From 358c7ea168038784d0b08618ace23240ce0d68df Mon Sep 17 00:00:00 2001 From: zramsay Date: Mon, 3 Apr 2023 16:41:07 -0400 Subject: [PATCH 01/17] key missing line --- app/data/stacks/build-support/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/app/data/stacks/build-support/README.md b/app/data/stacks/build-support/README.md index 72001518..e3993402 100644 --- a/app/data/stacks/build-support/README.md +++ b/app/data/stacks/build-support/README.md @@ -23,6 +23,7 @@ $ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbi ``` $ laconic-so --stack package-registry setup-repositories +$ laconic-so --stack package-registry build-containers $ laconic-so --stack package-registry deploy up [+] Running 3/3 ⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s -- 2.45.2 From 2515878eeb3456147d4e50e1b39c98f618d1a3d3 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Tue, 4 Apr 2023 14:53:28 +0530 Subject: [PATCH 02/17] Add ability to run Optimism fixturenet with external L1 endpoint (#273) * Remove unnecessary todos * Set option to log commands in shell scripts * Replace fixturenet-eth dependency with wait on endpoint * Skip lighthouse node dependency check * Update all services in the stack * Use debug flag to enable shell commands logging * Add bash in op-batcher container * Update mobymask-v2 instructions * Update fixturenet-optimism instructions * Add descriptions for services * Move ts files to container-build * Take L1 RPC endpoint from the env file * Add dev mode restriction for editing env file --- .../compose/docker-compose-fixturenet-eth.yml | 6 - .../docker-compose-fixturenet-optimism.yml | 60 +++--- .../docker-compose-watcher-mobymask-v2.yml | 10 +- .../fixturenet-optimism/generate-l2-config.sh | 3 + .../config/fixturenet-optimism/l1-params.env | 9 + .../optimism-contracts/run.sh | 45 +++-- .../fixturenet-optimism/run-op-batcher.sh | 3 + .../config/fixturenet-optimism/run-op-geth.sh | 3 + .../config/fixturenet-optimism/run-op-node.sh | 3 + app/data/config/wait-for-it.sh | 182 ++++++++++++++++++ ...nvite.sh => deploy-and-generate-invite.sh} | 0 .../cerc-optimism-contracts/Dockerfile | 1 - .../hardhat-tasks}/rekey-json.ts | 0 .../hardhat-tasks}/send-balance.ts | 0 .../cerc-optimism-op-batcher/Dockerfile | 2 +- app/data/stacks/fixturenet-optimism/README.md | 42 +++- app/data/stacks/fixturenet-optimism/stack.yml | 2 +- app/data/stacks/mobymask-v2/README.md | 41 ++-- app/data/stacks/mobymask-v2/demo.md | 36 ++-- 19 files changed, 349 insertions(+), 99 deletions(-) create mode 100644 app/data/config/fixturenet-optimism/l1-params.env create mode 100755 app/data/config/wait-for-it.sh rename app/data/config/watcher-mobymask-v2/{deploy-invite.sh => deploy-and-generate-invite.sh} (100%) rename app/data/{config/fixturenet-optimism/optimism-contracts => container-build/cerc-optimism-contracts/hardhat-tasks}/rekey-json.ts (100%) rename app/data/{config/fixturenet-optimism/optimism-contracts => container-build/cerc-optimism-contracts/hardhat-tasks}/send-balance.ts (100%) diff --git a/app/data/compose/docker-compose-fixturenet-eth.yml b/app/data/compose/docker-compose-fixturenet-eth.yml index 2eaad084..508543e4 100644 --- a/app/data/compose/docker-compose-fixturenet-eth.yml +++ b/app/data/compose/docker-compose-fixturenet-eth.yml @@ -58,12 +58,6 @@ services: environment: RUN_BOOTNODE: "true" image: cerc/fixturenet-eth-lighthouse:local - healthcheck: - test: ["CMD", "/scripts/status-internal.sh"] - interval: 10s - timeout: 100s - retries: 3 - start_period: 15s fixturenet-eth-lighthouse-1: hostname: fixturenet-eth-lighthouse-1 diff --git a/app/data/compose/docker-compose-fixturenet-optimism.yml b/app/data/compose/docker-compose-fixturenet-optimism.yml index 6dcde5c9..ee7d3cde 100644 --- a/app/data/compose/docker-compose-fixturenet-optimism.yml +++ b/app/data/compose/docker-compose-fixturenet-optimism.yml @@ -1,40 +1,46 @@ version: '3.7' services: + # Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts) + # Creates / updates the configuration for L1 contracts deployment + # Deploys the L1 smart contracts (outputs to volume l1_deployment) fixturenet-optimism-contracts: hostname: fixturenet-optimism-contracts image: cerc/optimism-contracts:local - depends_on: - fixturenet-eth-geth-1: - condition: service_healthy - fixturenet-eth-bootnode-lighthouse: - condition: service_healthy - environment: - CHAIN_ID: 1212 - L1_RPC: "http://fixturenet-eth-geth-1:8545" - command: "./run.sh" + env_file: + - ../config/fixturenet-optimism/l1-params.env + # Waits for L1 endpoint to be up before running the script + command: | + "./wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- ./run.sh" volumes: - - ../config/fixturenet-optimism/optimism-contracts/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts - - ../config/fixturenet-optimism/optimism-contracts/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts + - ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh + - ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts + - ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js - ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh - fixturenet_geth_accounts:/geth-accounts:ro - l2_accounts:/l2-accounts - l1_deployment:/app/packages/contracts-bedrock + extra_hosts: + - "host.docker.internal:host-gateway" + # Generates the config files required for L2 (outputs to volume op_node_data) op-node-l2-config-gen: image: cerc/optimism-op-node:local depends_on: fixturenet-optimism-contracts: condition: service_completed_successfully - environment: - L1_RPC: "http://fixturenet-eth-geth-1:8545" + env_file: + - ../config/fixturenet-optimism/l1-params.env volumes: - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh - l1_deployment:/contracts-bedrock:ro - op_node_data:/app command: ["sh", "/app/generate-l2-config.sh"] + extra_hosts: + - "host.docker.internal:host-gateway" + # Initializes and runs the L2 execution client op-geth: image: cerc/optimism-l2geth:local depends_on: @@ -55,9 +61,10 @@ services: retries: 10 start_period: 10s + # Runs the L2 consensus client (Sequencer node) op-node: - environment: - L1_RPC: "http://fixturenet-eth-geth-1:8545" + env_file: + - ../config/fixturenet-optimism/l1-params.env depends_on: op-geth: condition: service_healthy @@ -75,25 +82,32 @@ services: timeout: 10s retries: 10 start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" + # Runs the batcher (takes transactions from the Sequencer and publishes them to L1) op-batcher: - environment: - L1_RPC: "http://fixturenet-eth-geth-1:8545" + env_file: + - ../config/fixturenet-optimism/l1-params.env depends_on: - fixturenet-eth-geth-1: - condition: service_healthy op-node: condition: service_healthy op-geth: condition: service_healthy image: cerc/optimism-op-batcher:local volumes: + - ../config/wait-for-it.sh:/wait-for-it.sh - ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh - l2_accounts:/l2-accounts:ro - entrypoint: "sh" - command: "/run-op-batcher.sh" + entrypoint: ["sh", "-c"] + # Waits for L1 endpoint to be up before running the batcher + command: | + "/wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- /run-op-batcher.sh" + extra_hosts: + - "host.docker.internal:host-gateway" volumes: - op_node_data: - l2_accounts: + fixturenet_geth_accounts: l1_deployment: + l2_accounts: + op_node_data: diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/app/data/compose/docker-compose-watcher-mobymask-v2.yml index ed5c203a..8478a4d9 100644 --- a/app/data/compose/docker-compose-watcher-mobymask-v2.yml +++ b/app/data/compose/docker-compose-watcher-mobymask-v2.yml @@ -33,12 +33,12 @@ services: # TODO: Configure env file for ETH RPC URL & private key environment: - ENV=PROD - command: ["sh", "./deploy-invite.sh"] + command: ["sh", "./deploy-and-generate-invite.sh"] volumes: - # TODO: add a script to set rpc endpoint from env + # TODO: add a script to set RPC endpoint from env # add manually if running seperately - ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json - - ../config/watcher-mobymask-v2/deploy-invite.sh:/app/packages/server/deploy-invite.sh + - ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh - moby_data_server:/app/packages/server - fixturenet_geth_accounts:/geth-accounts:ro healthcheck: @@ -49,7 +49,7 @@ services: start_period: 10s mobymask-watcher-server: - # TODO: pass optimism rpc endpoint + # TODO: pass optimism RPC endpoint restart: unless-stopped depends_on: mobymask-watcher-db: @@ -59,7 +59,7 @@ services: image: cerc/watcher-mobymask-v2:local command: ["sh", "server-start.sh"] volumes: - # TODO: add a script to set rpc endpoint from env + # TODO: add a script to set RPC endpoint from env # add manually if running seperately - ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml - ../config/watcher-mobymask-v2/peer.env:/app/packages/peer/.env diff --git a/app/data/config/fixturenet-optimism/generate-l2-config.sh b/app/data/config/fixturenet-optimism/generate-l2-config.sh index 25ed2378..0ec60a08 100755 --- a/app/data/config/fixturenet-optimism/generate-l2-config.sh +++ b/app/data/config/fixturenet-optimism/generate-l2-config.sh @@ -1,5 +1,8 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi op-node genesis l2 \ --deploy-config /contracts-bedrock/deploy-config/getting-started.json \ diff --git a/app/data/config/fixturenet-optimism/l1-params.env b/app/data/config/fixturenet-optimism/l1-params.env new file mode 100644 index 00000000..31876523 --- /dev/null +++ b/app/data/config/fixturenet-optimism/l1-params.env @@ -0,0 +1,9 @@ +# Change if pointing to an external L1 endpoint +L1_RPC="http://fixturenet-eth-geth-1:8545" +L1_CHAIN_ID=1212 +L1_HOST="fixturenet-eth-geth-1" +L1_PORT=8545 +L1_ADDRESS= +L1_PRIV_KEY= +L1_ADDRESS_2= +L1_PRIV_KEY_2= diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh b/app/data/config/fixturenet-optimism/optimism-contracts/run.sh index 473b8743..e10c45e1 100755 --- a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh +++ b/app/data/config/fixturenet-optimism/optimism-contracts/run.sh @@ -1,5 +1,8 @@ #!/bin/bash set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi # TODO Support restarts; fixturenet-eth-geth currently starts fresh on a restart # Exit if a deployment already exists (on restarts) @@ -8,12 +11,14 @@ set -e # exit 0 # fi +echo "Using L1 RPC endpoint ${L1_RPC}" + # Append tasks/index.ts file echo "import './rekey-json'" >> tasks/index.ts echo "import './send-balance'" >> tasks/index.ts # Update the chainId in the hardhat config -sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CHAIN_ID,/}" hardhat.config.ts +sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $L1_CHAIN_ID,/}" hardhat.config.ts # Generate the L2 account addresses yarn hardhat rekey-json --output /l2-accounts/keys.json @@ -29,11 +34,27 @@ BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address') SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address') # Read the private key of L1 accounts -# TODO: Take from env if /geth-accounts volume doesn't exist to allow using separately running L1 -L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2) -L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) -L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv) -L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) +if [ -f /geth-accounts/accounts.csv ]; then + echo "Using L1 account credentials from the mounted volume" + L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2) + L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) + L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv) + L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) +else + echo "Using L1 account credentials from env" +fi + +# Select a finalized L1 block as the starting point for roll ups +until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$L1_RPC"); do + echo "Waiting for a finalized L1 block to exist, retrying after 10s" + sleep 10 +done + +L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}') +L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}') +L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}') + +echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups" # Send balances to the above L2 addresses yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${L1_PRIV_KEY}" --network getting-started @@ -42,19 +63,9 @@ yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key echo "Balances sent to L2 accounts" -# Select a finalized L1 block as the starting point for roll ups -# TODO Use web3.js to get the latest finalized block -until CAST_OUTPUT=$(cast block finalized --rpc-url "$L1_RPC"); do - echo "Waiting for a finalized L1 block to exist, retrying after 10s" - sleep 10 -done - -L1_BLOCKHASH=$(echo "$CAST_OUTPUT" | awk '/hash/{print $2}') -L1_BLOCKTIMESTAMP=$(echo "$CAST_OUTPUT" | awk '/timestamp/{print $2}') - # Update the deployment config sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json -jq --arg chainid "$CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json +jq --arg chainid "$L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH" diff --git a/app/data/config/fixturenet-optimism/run-op-batcher.sh b/app/data/config/fixturenet-optimism/run-op-batcher.sh index 38ac3ab9..3cab3e94 100755 --- a/app/data/config/fixturenet-optimism/run-op-batcher.sh +++ b/app/data/config/fixturenet-optimism/run-op-batcher.sh @@ -1,5 +1,8 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi # Get BACTHER_KEY from keys.json BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"') diff --git a/app/data/config/fixturenet-optimism/run-op-geth.sh b/app/data/config/fixturenet-optimism/run-op-geth.sh index 1b4e2b0f..cb180065 100755 --- a/app/data/config/fixturenet-optimism/run-op-geth.sh +++ b/app/data/config/fixturenet-optimism/run-op-geth.sh @@ -1,5 +1,8 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi mkdir datadir diff --git a/app/data/config/fixturenet-optimism/run-op-node.sh b/app/data/config/fixturenet-optimism/run-op-node.sh index 3be417d0..f495aeef 100755 --- a/app/data/config/fixturenet-optimism/run-op-node.sh +++ b/app/data/config/fixturenet-optimism/run-op-node.sh @@ -1,5 +1,8 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi # Get SEQUENCER KEY from keys.json SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') diff --git a/app/data/config/wait-for-it.sh b/app/data/config/wait-for-it.sh new file mode 100755 index 00000000..d990e0d3 --- /dev/null +++ b/app/data/config/wait-for-it.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# Check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) + +WAITFORIT_BUSYTIMEFLAG="" +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi +else + WAITFORIT_ISBUSY=0 +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi diff --git a/app/data/config/watcher-mobymask-v2/deploy-invite.sh b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/deploy-invite.sh rename to app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh diff --git a/app/data/container-build/cerc-optimism-contracts/Dockerfile b/app/data/container-build/cerc-optimism-contracts/Dockerfile index 4f244bab..cdc98b08 100644 --- a/app/data/container-build/cerc-optimism-contracts/Dockerfile +++ b/app/data/container-build/cerc-optimism-contracts/Dockerfile @@ -1,4 +1,3 @@ -# TODO: Use a node alpine image FROM cerc/foundry:local # Install node (local foundry is a debian based image) diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/rekey-json.ts b/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts similarity index 100% rename from app/data/config/fixturenet-optimism/optimism-contracts/rekey-json.ts rename to app/data/container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/send-balance.ts b/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts similarity index 100% rename from app/data/config/fixturenet-optimism/optimism-contracts/send-balance.ts rename to app/data/container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts diff --git a/app/data/container-build/cerc-optimism-op-batcher/Dockerfile b/app/data/container-build/cerc-optimism-op-batcher/Dockerfile index 542a075f..23d6b629 100644 --- a/app/data/container-build/cerc-optimism-op-batcher/Dockerfile +++ b/app/data/container-build/cerc-optimism-op-batcher/Dockerfile @@ -25,7 +25,7 @@ RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH FROM alpine:3.15 -RUN apk add --no-cache jq +RUN apk add --no-cache jq bash COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin diff --git a/app/data/stacks/fixturenet-optimism/README.md b/app/data/stacks/fixturenet-optimism/README.md index 5b9dd51b..2f13ec88 100644 --- a/app/data/stacks/fixturenet-optimism/README.md +++ b/app/data/stacks/fixturenet-optimism/README.md @@ -8,12 +8,15 @@ Clone required repositories: ```bash laconic-so --stack fixturenet-optimism setup-repositories + +# Exclude cerc-io/go-ethereum repository if running L1 separately +laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum ``` Checkout to the required versions and branches in repos: ```bash -# optimism +# Optimism cd ~/cerc/optimism git checkout @eth-optimism/sdk@0.0.0-20230329025055 ``` @@ -22,6 +25,9 @@ Build the container images: ```bash laconic-so --stack fixturenet-optimism build-containers + +# Only build containers required for L2 if running L1 separately +laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher ``` This should create the required docker images in the local image registry: @@ -37,19 +43,36 @@ This should create the required docker images in the local image registry: ## Deploy +(Optional) Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params if running L1 separately + +* NOTE: + * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file + * If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port + Deploy the stack: ```bash laconic-so --stack fixturenet-optimism deploy up + +# Only start fixturenet-optimism pod (L2) if running L1 separately +laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism ``` -To list down the running containers: +The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: +1. waits for the 'Merge' to happen on L1 +2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) +3. deploys the L1 contracts + +To list down and monitor the running containers: ```bash laconic-so --stack fixturenet-optimism deploy ps # With status docker ps + +# Check logs for a container +docker logs -f ``` ## Clean up @@ -58,23 +81,24 @@ Stop all services running in the background: ```bash laconic-so --stack fixturenet-optimism deploy down + +# If only ran fixturenet-optimism pod (L2) +laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism ``` -Remove volumes created by this stack: +Clear volumes created by this stack: ```bash -docker volume ls +# List all relevant volumes +docker volume ls -q --filter name=laconic* -docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_fixturenet-geth-accounts -docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_l1-deployment -docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_l2-accounts -docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_op_node_data +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter name=laconic*) ``` ## Known Issues * Currently not supported: * Stopping and restarting the stack from where it left off; currently starts fresh on a restart - * Pointing Optimism (L2) to external L1 endpoint to allow running only L2 services * Resource requirements (memory + time) for building `cerc/foundry` image are on the higher side * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) diff --git a/app/data/stacks/fixturenet-optimism/stack.yml b/app/data/stacks/fixturenet-optimism/stack.yml index 299b1367..e53623ca 100644 --- a/app/data/stacks/fixturenet-optimism/stack.yml +++ b/app/data/stacks/fixturenet-optimism/stack.yml @@ -13,9 +13,9 @@ containers: - cerc/fixturenet-eth-lighthouse - cerc/foundry - cerc/optimism-contracts + - cerc/optimism-op-node - cerc/optimism-l2geth - cerc/optimism-op-batcher - - cerc/optimism-op-node pods: - fixturenet-eth - fixturenet-optimism diff --git a/app/data/stacks/mobymask-v2/README.md b/app/data/stacks/mobymask-v2/README.md index 18af5b2b..68b9b61c 100644 --- a/app/data/stacks/mobymask-v2/README.md +++ b/app/data/stacks/mobymask-v2/README.md @@ -31,6 +31,10 @@ git checkout laconic # MobyMask cd ~/cerc/MobyMask git checkout v0.1.1 + +# Optimism +cd ~/cerc/optimism +git checkout @eth-optimism/sdk@0.0.0-20230329025055 ``` Build the container images: @@ -43,29 +47,33 @@ This should create the required docker images in the local image registry. Deploy the stack: -* Deploy the containers +* Deploy the containers: ```bash laconic-so --stack mobymask-v2 deploy-system up ``` -* Check that all containers are healthy using `docker ps` +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` - NOTE: The `mobymask-ui` container might not start. If mobymask-app is not running at http://localhost:3002, run command again to start the container + NOTE: The `mobymask-app` container might not start; if the app is not running at http://localhost:3002, restart the container using it's id: ```bash - laconic-so --stack mobymask-v2 deploy-system up + docker ps -a | grep "mobymask-app" + + docker restart ``` ## Tests -Find the watcher container's id: +Find the watcher container's id and export it for later use: ```bash laconic-so --stack mobymask-v2 deploy-system ps | grep "mobymask-watcher-server" + +export CONTAINER_ID= ``` -Example output +Example output: ``` id: 5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c, name: laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp @@ -73,12 +81,6 @@ id: 5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c, name: laco In above output the container ID is `5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c` -Export it for later use: - -```bash -export CONTAINER_ID= -``` - Run the peer tests: ```bash @@ -87,7 +89,11 @@ docker exec -w /app/packages/peer $CONTAINER_ID yarn test ## Web Apps -Check that the status for web-app containers are healthy by using `docker ps` +Check that the web-app containers are healthy: + +```bash +docker ps | grep -E 'mobymask-app|peer-test-app' +``` ### mobymask-app @@ -119,15 +125,14 @@ laconic-so --stack mobymask-v2 deploy-system down Clear volumes: -* List all volumes +* List all relevant volumes: ```bash - docker volume ls + docker volume ls -q --filter name=laconic* ``` -* Remove volumes created by this stack +* Remove all the listed volumes: - Example: ```bash - docker volume rm laconic-bfb01caf98b1b8f7c8db4d33f11b905a_moby_data_server + docker volume rm $(docker volume ls -q --filter name=laconic*) ``` diff --git a/app/data/stacks/mobymask-v2/demo.md b/app/data/stacks/mobymask-v2/demo.md index 35416780..8a5049c4 100644 --- a/app/data/stacks/mobymask-v2/demo.md +++ b/app/data/stacks/mobymask-v2/demo.md @@ -1,22 +1,22 @@ # Demo -* Get the root invite link URL for mobymask-app +* Get the root invite link URL for mobymask-app: - ``` + ```bash laconic-so --stack mobymask-v2 deploy-system logs mobymask ``` - The invite link is seen at the end of the logs - Example: - ``` + The invite link is seen at the end of the logs. Example log: + + ```bash laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-1 | http://127.0.0.1:3002/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D ``` -* Open the invite link in browser to use the mobymask-app. +* Open the invite link in a browser to use the mobymask-app. NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations -* In the debug panel, check if it is connected to the p2p network (It should be connected to atleast one other peer for pubsub to work). +* In the debug panel, check if it is connected to the p2p network (it should be connected to at least one other peer for pubsub to work). * Create an invite link in the app by clicking on `Create new invite link` button. @@ -31,24 +31,24 @@ * In a terminal check logs from the watcher peer container. - * Get the container id + * Get the container id: ```bash laconic-so --stack mobymask-v2 deploy-system ps | grep mobymask-watcher-server ``` - * Check logs + * Check logs: ```bash - docker logs -f CONTAINER_ID + docker logs -f ``` -* It should have received the message, sent transaction to L2 chain and received a transaction receipt with block details. +* It should have received the message, sent transaction to L2 chain and received a transaction receipt for an `invoke` message with block details. Example log: - ``` - 2023-03-23T10:25:19.771Z vulcanize:peer-listener [10:25:19] Received a message on mobymask P2P network from peer: PeerId(12D3KooWAVNswtcrX12iDYukEoxdQwD34kJyRWcQTfZ4unGg2xjd) + ```bash + 2023-03-23T10:25:19.771Z vulcanize:peer-listener [10:25:19] Received a message on mobymask P2P network from peer: 12D3KooWAVNswtcrX12iDYukEoxdQwD34kJyRWcQTfZ4unGg2xjd 2023-03-23T10:25:24.143Z laconic:libp2p-utils Transaction receipt for invoke message { to: '0x558024C7d593B840E1BfD83E9B287a5CDad4db15', blockNumber: 1996, @@ -60,7 +60,7 @@ ``` * Check the phisher in watcher GQL: http://localhost:3001/graphql - * Use the blockHash from transaction receipt details or query for latest block + * Use the blockHash from transaction receipt details or query for latest block: ```gql query { @@ -71,7 +71,7 @@ } ``` - * Get the deployed contract address + * Get the deployed contract address: ```bash laconic-so --stack mobymask-v2 deploy-system exec mobymask-app "cat src/config.json" @@ -94,7 +94,7 @@ } ``` - It should return true for reported phisher names. + It should return `true` for reported phisher names. * Watcher internally is using L2 chain `eth_getStorageAt` method. @@ -107,7 +107,7 @@ * Revocation messages can be seen in the debug panel `MESSAGES` tab of other browsers. -* Check the watcher peer logs. It should receive a message and log the transaction receipt for revoke message. +* Check the watcher peer logs. It should receive a message and log the transaction receipt for a `revoke` message. * Try reporting a phisher from the revoked invitee's browser. @@ -129,4 +129,4 @@ } ``` - It should return false as the invitation/delegation used for reporting phishers has been revoked. + It should return `false` as the invitation/delegation used for reporting phishers has been revoked. -- 2.45.2 From 9e4240df0763dc963ae85196c62e0362bcedb897 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 4 Apr 2023 11:29:16 -0600 Subject: [PATCH 03/17] Update version --- app/data/version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/data/version.txt b/app/data/version.txt index 7a0035e1..0c1b3362 100644 --- a/app/data/version.txt +++ b/app/data/version.txt @@ -1,2 +1,2 @@ # This file should be re-generated running: scripts/update-version-file.sh script -v1.0.35-df23476 +v1.0.36-2515878 -- 2.45.2 From 11375fed0c9309f98693320db98c2dad3157fbd6 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 4 Apr 2023 20:26:19 -0600 Subject: [PATCH 04/17] Fail on error installing package --- .../container-build/cerc-builder-gerbil/install-dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh b/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh index dbbbfe84..36855a9b 100755 --- a/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh +++ b/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh @@ -11,6 +11,6 @@ DEPS=(github.com/fare/gerbil-utils ) ; for i in ${DEPS[@]} ; do echo "Installing gerbil package: $i" - gxpkg install $i && + gxpkg install $i gxpkg build $i done -- 2.45.2 From 9ffa9bb5a97b454da76c8cdeb1722109ce430b53 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Wed, 5 Apr 2023 10:25:50 +0530 Subject: [PATCH 05/17] Handle restarts for services in `fixturenet-optimism` stack (#282) * Check existing L1 contracts deployment * Rename volume used for generated L2 config * Check for existing L2 geth data directory * Cross check existing L2 config against L1 deployment config * Verify sequencer key in existing L2 geth data directory * Add instructions to troubleshoot corrupt L2 geth dir * Separate out instructions to run L2 with external L1 * Update docs --- .../docker-compose-fixturenet-optimism.yml | 15 ++-- .../fixturenet-optimism/generate-l2-config.sh | 21 +++++ .../optimism-contracts/run.sh | 41 ++++++--- .../config/fixturenet-optimism/run-op-geth.sh | 52 +++++++---- .../verify-contract-deployment.ts | 30 +++++++ .../stacks/fixturenet-optimism/L2-ONLY.md | 87 +++++++++++++++++++ app/data/stacks/fixturenet-optimism/README.md | 56 +++++++----- 7 files changed, 247 insertions(+), 55 deletions(-) create mode 100644 app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts create mode 100644 app/data/stacks/fixturenet-optimism/L2-ONLY.md diff --git a/app/data/compose/docker-compose-fixturenet-optimism.yml b/app/data/compose/docker-compose-fixturenet-optimism.yml index ee7d3cde..d2e96d3f 100644 --- a/app/data/compose/docker-compose-fixturenet-optimism.yml +++ b/app/data/compose/docker-compose-fixturenet-optimism.yml @@ -14,6 +14,7 @@ services: "./wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- ./run.sh" volumes: - ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh + - ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts - ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts - ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js @@ -24,7 +25,7 @@ services: extra_hosts: - "host.docker.internal:host-gateway" - # Generates the config files required for L2 (outputs to volume op_node_data) + # Generates the config files required for L2 (outputs to volume l2_config) op-node-l2-config-gen: image: cerc/optimism-op-node:local depends_on: @@ -35,12 +36,12 @@ services: volumes: - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh - l1_deployment:/contracts-bedrock:ro - - op_node_data:/app + - l2_config:/app command: ["sh", "/app/generate-l2-config.sh"] extra_hosts: - "host.docker.internal:host-gateway" - # Initializes and runs the L2 execution client + # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) op-geth: image: cerc/optimism-l2geth:local depends_on: @@ -48,8 +49,9 @@ services: condition: service_started volumes: - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh - - op_node_data:/op-node:ro + - l2_config:/op-node:ro - l2_accounts:/l2-accounts:ro + - l2_geth_data:/datadir entrypoint: "sh" command: "/run-op-geth.sh" ports: @@ -71,7 +73,7 @@ services: image: cerc/optimism-op-node:local volumes: - ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh - - op_node_data:/op-node-data:ro + - l2_config:/op-node-data:ro - l2_accounts:/l2-accounts:ro command: ["sh", "/app/run-op-node.sh"] ports: @@ -110,4 +112,5 @@ volumes: fixturenet_geth_accounts: l1_deployment: l2_accounts: - op_node_data: + l2_config: + l2_geth_data: diff --git a/app/data/config/fixturenet-optimism/generate-l2-config.sh b/app/data/config/fixturenet-optimism/generate-l2-config.sh index 0ec60a08..9c439f32 100755 --- a/app/data/config/fixturenet-optimism/generate-l2-config.sh +++ b/app/data/config/fixturenet-optimism/generate-l2-config.sh @@ -4,6 +4,27 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +# Check existing config if it exists +if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then + echo "Found existing L2 config, cross-checking with L1 deployment config" + + SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json) + EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag') + EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress') + + GEN_L2_CONF=$(cat /app/rollup.json) + GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash') + GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr') + + if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then + echo "Config cross-checked, exiting" + exit 0 + fi + + echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting" + exit 1 +fi + op-node genesis l2 \ --deploy-config /contracts-bedrock/deploy-config/getting-started.json \ --deployment-dir /contracts-bedrock/deployments/getting-started/ \ diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh b/app/data/config/fixturenet-optimism/optimism-contracts/run.sh index e10c45e1..d4585d4a 100755 --- a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh +++ b/app/data/config/fixturenet-optimism/optimism-contracts/run.sh @@ -4,22 +4,43 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -# TODO Support restarts; fixturenet-eth-geth currently starts fresh on a restart -# Exit if a deployment already exists (on restarts) -# if [ -d "deployments/getting-started" ]; then -# echo "Deployment directory deployments/getting-started already exists, exiting" -# exit 0 -# fi - echo "Using L1 RPC endpoint ${L1_RPC}" -# Append tasks/index.ts file -echo "import './rekey-json'" >> tasks/index.ts -echo "import './send-balance'" >> tasks/index.ts +IMPORT_1="import './verify-contract-deployment'" +IMPORT_2="import './rekey-json'" +IMPORT_3="import './send-balance'" + +# Append mounted tasks to tasks/index.ts file if not present +if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then + echo "$IMPORT_1" >> tasks/index.ts + echo "$IMPORT_2" >> tasks/index.ts + echo "$IMPORT_3" >> tasks/index.ts +fi # Update the chainId in the hardhat config sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $L1_CHAIN_ID,/}" hardhat.config.ts +# Exit if a deployment already exists (on restarts) +# Note: fixturenet-eth-geth currently starts fresh on a restart +if [ -d "deployments/getting-started" ]; then + echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment" + + # Read JSON file into variable + SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json) + + # Parse JSON into variables + SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address') + SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash') + + if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then + echo "Deployment verfication successful, exiting" + exit 0 + else + echo "Deployment verfication failed, please clear L1 deployment volume before starting" + exit 1 + fi +fi + # Generate the L2 account addresses yarn hardhat rekey-json --output /l2-accounts/keys.json diff --git a/app/data/config/fixturenet-optimism/run-op-geth.sh b/app/data/config/fixturenet-optimism/run-op-geth.sh index cb180065..68f6c5c5 100755 --- a/app/data/config/fixturenet-optimism/run-op-geth.sh +++ b/app/data/config/fixturenet-optimism/run-op-geth.sh @@ -4,34 +4,50 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -mkdir datadir - -echo "pwd" > datadir/password - # TODO: Add in container build or use other tool -echo "installing jq" +echo "Installing jq" apk update && apk add jq -# Get SEQUENCER KEY from keys.json +# Get SEQUENCER key from keys.json SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') -echo $SEQUENCER_KEY > datadir/block-signer-key -geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key +# Initialize op-geth if datadir/geth not found +if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then + echo "Found existing datadir, checking block signer key" -while [ ! -f "/op-node/jwt.txt" ] -do - echo "Config files not created. Checking after 5 seconds." - sleep 5 -done + BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key) -echo "Config files created by op-node, proceeding with script..." + if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then + echo "Sequencer and block signer keys match, skipping initialization" + else + echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting" + exit 1 + fi +else + echo "Initializing op-geth" -cp /op-node/genesis.json ./ -geth init --datadir=datadir genesis.json + mkdir -p datadir + echo "pwd" > datadir/password + echo $SEQUENCER_KEY > datadir/block-signer-key + + geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key + + while [ ! -f "/op-node/jwt.txt" ] + do + echo "Config files not created. Checking after 5 seconds." + sleep 5 + done + + echo "Config files created by op-node, proceeding with the initialization..." + + geth init --datadir=datadir /op-node/genesis.json + echo "Node Initialized" +fi SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"') echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}" -cp /op-node/jwt.txt ./ + +# Run op-geth geth \ --datadir ./datadir \ --http \ @@ -52,7 +68,7 @@ geth \ --authrpc.vhosts="*" \ --authrpc.addr=0.0.0.0 \ --authrpc.port=8551 \ - --authrpc.jwtsecret=./jwt.txt \ + --authrpc.jwtsecret=/op-node/jwt.txt \ --rollup.disabletxpoolgossip=true \ --password=./datadir/password \ --allow-insecure-unlock \ diff --git a/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts b/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts new file mode 100644 index 00000000..5b673ca1 --- /dev/null +++ b/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts @@ -0,0 +1,30 @@ +import { task } from 'hardhat/config' +import '@nomiclabs/hardhat-ethers' + +task( + 'verify-contract-deployment', + 'Verifies the given contract deployment transaction' +) + .addParam('contract', 'Address of the contract deployed') + .addParam('transactionHash', 'Hash of the deployment transaction') + .setAction(async ({ contract, transactionHash }, { ethers }) => { + const provider = new ethers.providers.JsonRpcProvider( + `${process.env.L1_RPC}` + ) + + // Get the deployment tx receipt + const receipt = await provider.getTransactionReceipt(transactionHash) + if ( + receipt && + receipt.contractAddress && + receipt.contractAddress === contract + ) { + console.log( + `Deployment for contract ${contract} in transaction ${transactionHash} verified` + ) + process.exit(0) + } else { + console.log(`Contract ${contract} deployment verification failed`) + process.exit(1) + } + }) diff --git a/app/data/stacks/fixturenet-optimism/L2-ONLY.md b/app/data/stacks/fixturenet-optimism/L2-ONLY.md new file mode 100644 index 00000000..55eab385 --- /dev/null +++ b/app/data/stacks/fixturenet-optimism/L2-ONLY.md @@ -0,0 +1,87 @@ +# fixturenet-optimism + +Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io) + +## Setup + +Prerequisite: An L1 Ethereum RPC endpoint + +Clone required repositories: + +```bash +laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum +``` + +Checkout to the required versions and branches in repos: + +```bash +# Optimism +cd ~/cerc/optimism +git checkout @eth-optimism/sdk@0.0.0-20230329025055 +``` + +Build the container images: + +```bash +laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher +``` + +This should create the required docker images in the local image registry: +* `cerc/foundry` +* `cerc/optimism-contracts` +* `cerc/optimism-l2geth` +* `cerc/optimism-op-batcher` +* `cerc/optimism-op-node` + +## Deploy + +Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params + +* NOTE: + * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file + * If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +Deploy the stack: + +```bash +laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism +``` + +The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: +1. waits for the 'Merge' to happen on L1 +2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) +3. deploys the L1 contracts + +To list down and monitor the running containers: + +```bash +laconic-so --stack fixturenet-optimism deploy ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +## Clean up + +Stop all services running in the background: + +```bash +laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter name=laconic* + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter name=laconic*) +``` + +## Troubleshooting + +See [Troubleshooting](./README.md#troubleshooting) \ No newline at end of file diff --git a/app/data/stacks/fixturenet-optimism/README.md b/app/data/stacks/fixturenet-optimism/README.md index 2f13ec88..65839e70 100644 --- a/app/data/stacks/fixturenet-optimism/README.md +++ b/app/data/stacks/fixturenet-optimism/README.md @@ -2,15 +2,14 @@ Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2) +We support running just the L2 part of stack, given an external L1 endpoint. Follow [L2-ONLY](./L2-ONLY.md) for the same. + ## Setup Clone required repositories: ```bash laconic-so --stack fixturenet-optimism setup-repositories - -# Exclude cerc-io/go-ethereum repository if running L1 separately -laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum ``` Checkout to the required versions and branches in repos: @@ -25,9 +24,6 @@ Build the container images: ```bash laconic-so --stack fixturenet-optimism build-containers - -# Only build containers required for L2 if running L1 separately -laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher ``` This should create the required docker images in the local image registry: @@ -43,19 +39,10 @@ This should create the required docker images in the local image registry: ## Deploy -(Optional) Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params if running L1 separately - -* NOTE: - * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file - * If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port - Deploy the stack: ```bash laconic-so --stack fixturenet-optimism deploy up - -# Only start fixturenet-optimism pod (L2) if running L1 separately -laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism ``` The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: @@ -81,9 +68,6 @@ Stop all services running in the background: ```bash laconic-so --stack fixturenet-optimism deploy down - -# If only ran fixturenet-optimism pod (L2) -laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism ``` Clear volumes created by this stack: @@ -96,9 +80,39 @@ docker volume ls -q --filter name=laconic* docker volume rm $(docker volume ls -q --filter name=laconic*) ``` +## Troubleshooting + +* If `op-geth` service aborts or is restarted, the following error might occur in the `op-node` service: + + ```bash + WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found" + ``` + +* This means that the data directory that `op-geth` is using is corrupted and needs to be reinitialized; the containers `op-geth`, `op-node` and `op-batcher` need to be started afresh: + * Stop and remove the concerned containers: + + ```bash + # List the containers + docker ps -f "name=op-geth|op-node|op-batcher" + + # Force stop and remove the listed containers + docker rm -f $(docker ps -qf "name=op-geth|op-node|op-batcher") + ``` + + * Remove the concerned volume: + + ```bash + # List the volume + docker volume ls -q --filter name=l2_geth_data + + # Remove the listed volume + docker volume rm $(docker volume ls -q --filter name=l2_geth_data) + ``` + + * Reuse the deployment command used in [Deploy](#deploy) to restart the stopped containers + ## Known Issues -* Currently not supported: - * Stopping and restarting the stack from where it left off; currently starts fresh on a restart -* Resource requirements (memory + time) for building `cerc/foundry` image are on the higher side +* `fixturenet-eth` currently starts fresh on a restart +* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) -- 2.45.2 From 94e38ceabaa89a3ec54e25f4a03cf253d5fae5f9 Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Wed, 5 Apr 2023 17:26:38 +0530 Subject: [PATCH 06/17] Add ability to run mobymask-v2 stack with external optimism endpoint (#279) * Set optimism geth endpoint from env file * Set L1 account private keys from env * Only deploy contract and generate invite in mobymask container * Add readme for running mobymask v2 stack independently * Modify mobymask container to stop running server and update readmes * Check deployer account balance before deploying contract * Fix for checking account balance before deploying * Update readme description * Update MobyMask repo tag in readme --- .../docker-compose-fixturenet-optimism.yml | 4 +- .../docker-compose-mobymask-laconicd.yml | 25 ------ .../docker-compose-watcher-mobymask-v2.yml | 39 ++++----- .../config/fixturenet-optimism/l1-params.env | 7 +- .../deploy-and-generate-invite.sh | 38 +++++++-- .../watcher-mobymask-v2/mobymask-app-start.sh | 3 + .../watcher-mobymask-v2/optimism-params.env | 12 +++ .../watcher-mobymask-v2/secrets-template.json | 2 +- .../watcher-mobymask-v2/server-start.sh | 18 +++- .../watcher-config-template.toml | 2 +- .../container-build/cerc-mobymask/Dockerfile | 2 +- app/data/pod-list.txt | 3 +- app/data/stacks/fixturenet-optimism/README.md | 6 +- .../{L2-ONLY.md => l2-only.md} | 6 +- app/data/stacks/mobymask-v2/README.md | 23 ++--- app/data/stacks/mobymask-v2/mobymask-only.md | 85 +++++++++++++++++++ 16 files changed, 194 insertions(+), 81 deletions(-) delete mode 100644 app/data/compose/docker-compose-mobymask-laconicd.yml mode change 100644 => 100755 app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh create mode 100644 app/data/config/watcher-mobymask-v2/optimism-params.env rename app/data/stacks/fixturenet-optimism/{L2-ONLY.md => l2-only.md} (86%) create mode 100644 app/data/stacks/mobymask-v2/mobymask-only.md diff --git a/app/data/compose/docker-compose-fixturenet-optimism.yml b/app/data/compose/docker-compose-fixturenet-optimism.yml index d2e96d3f..142a2251 100644 --- a/app/data/compose/docker-compose-fixturenet-optimism.yml +++ b/app/data/compose/docker-compose-fixturenet-optimism.yml @@ -55,7 +55,7 @@ services: entrypoint: "sh" command: "/run-op-geth.sh" ports: - - "8545" + - "0.0.0.0:8545:8545" healthcheck: test: ["CMD", "nc", "-vz", "localhost:8545"] interval: 30s @@ -77,7 +77,7 @@ services: - l2_accounts:/l2-accounts:ro command: ["sh", "/app/run-op-node.sh"] ports: - - "8547" + - "0.0.0.0:8547:8547" healthcheck: test: ["CMD", "nc", "-vz", "localhost:8547"] interval: 30s diff --git a/app/data/compose/docker-compose-mobymask-laconicd.yml b/app/data/compose/docker-compose-mobymask-laconicd.yml deleted file mode 100644 index 4847aeb3..00000000 --- a/app/data/compose/docker-compose-mobymask-laconicd.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: "3.2" - -services: - laconicd: - restart: unless-stopped - image: cerc/laconicd:local - command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] - volumes: - - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ports: - - "9473" - - "8545" - - "8546" - - "1317" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "8545"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - -networks: - # https://docs.docker.com/compose/networking/#configure-the-default-network - default: - name: mobymask-v2-network diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/app/data/compose/docker-compose-watcher-mobymask-v2.yml index 8478a4d9..667191a1 100644 --- a/app/data/compose/docker-compose-watcher-mobymask-v2.yml +++ b/app/data/compose/docker-compose-watcher-mobymask-v2.yml @@ -22,45 +22,40 @@ services: start_period: 10s mobymask: - restart: unless-stopped image: cerc/mobymask:local working_dir: /app/packages/server - depends_on: - op-node: - condition: service_healthy - op-geth: - condition: service_healthy - # TODO: Configure env file for ETH RPC URL & private key + env_file: + - ../config/watcher-mobymask-v2/optimism-params.env environment: - ENV=PROD - command: ["sh", "./deploy-and-generate-invite.sh"] + command: + - sh + - -c + - | + ./wait-for-it.sh -h $${L2_GETH_HOST} -p $${L2_GETH_PORT} -s -t 0 && \ + ./wait-for-it.sh -h $${L2_NODE_HOST} -p $${L2_NODE_PORT} -s -t 0 && \ + ./deploy-and-generate-invite.sh volumes: - # TODO: add a script to set RPC endpoint from env - # add manually if running seperately + - ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh - ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json - ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh - moby_data_server:/app/packages/server - fixturenet_geth_accounts:/geth-accounts:ro - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "3330"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" mobymask-watcher-server: - # TODO: pass optimism RPC endpoint restart: unless-stopped depends_on: mobymask-watcher-db: condition: service_healthy mobymask: - condition: service_healthy + condition: service_completed_successfully image: cerc/watcher-mobymask-v2:local + env_file: + - ../config/watcher-mobymask-v2/optimism-params.env command: ["sh", "server-start.sh"] volumes: - # TODO: add a script to set RPC endpoint from env - # add manually if running seperately - ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml - ../config/watcher-mobymask-v2/peer.env:/app/packages/peer/.env - ../config/watcher-mobymask-v2/relay-id.json:/app/packages/mobymask-v2-watcher/relay-id.json @@ -78,6 +73,8 @@ services: timeout: 5s retries: 15 start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" # TODO: Move to a separate pod mobymask-app: @@ -85,7 +82,7 @@ services: mobymask-watcher-server: condition: service_healthy mobymask: - condition: service_healthy + condition: service_completed_successfully image: cerc/mobymask-ui:local command: ["sh", "mobymask-app-start.sh"] volumes: diff --git a/app/data/config/fixturenet-optimism/l1-params.env b/app/data/config/fixturenet-optimism/l1-params.env index 31876523..93c58805 100644 --- a/app/data/config/fixturenet-optimism/l1-params.env +++ b/app/data/config/fixturenet-optimism/l1-params.env @@ -1,8 +1,13 @@ # Change if pointing to an external L1 endpoint -L1_RPC="http://fixturenet-eth-geth-1:8545" + +# L1 endpoint L1_CHAIN_ID=1212 +L1_RPC="http://fixturenet-eth-geth-1:8545" L1_HOST="fixturenet-eth-geth-1" L1_PORT=8545 + +# Credentials for accounts on L1 to send balance to Optimism Proxy contract from +# (enables them to do transactions on L2) L1_ADDRESS= L1_PRIV_KEY= L1_ADDRESS_2= diff --git a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh old mode 100644 new mode 100755 index 2a8e0eb5..b53b6b56 --- a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh +++ b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh @@ -1,11 +1,39 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi -# Read the private key of L1 account to deploy contract -# TODO: Take from env if /geth-accounts volume doesn't exist to allow using separately running L1 -L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) + +if [ -f /geth-accounts/accounts.csv ]; then + echo "Using L1 private key from the mounted volume" + # Read the private key of L1 account to deploy contract + PRIVATE_KEY_DEPLOYER=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) +else + echo "Using PRIVATE_KEY_DEPLOYER from env" +fi # Set the private key -jq --arg privateKey "$L1_PRIV_KEY" '.privateKey = ($privateKey)' secrets-template.json > secrets.json +jq --arg privateKey "$PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json -npm start +export L2_GETH_URL="http://${L2_GETH_HOST}:${L2_GETH_PORT}" +jq --arg rpcUrl "$L2_GETH_URL" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json + +cd ../hardhat +export RPC_URL="${L2_GETH_URL}" + +while true; do + ACCOUNT_BALANCE=$(yarn hardhat --network optimism balance $PRIVATE_KEY_DEPLOYER | grep ETH) + + if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then + echo "Account balance updated: $ACCOUNT_BALANCE" + break # exit the loop + fi + + echo "Account balance not updated: $ACCOUNT_BALANCE" + echo "Checking after 2 seconds" + sleep 2 +done + +cd ../server +npm run deployAndGenerateInvite diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh b/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh index fa3870cf..efc4ed96 100755 --- a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh +++ b/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh @@ -1,5 +1,8 @@ #!/bin/sh set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi # Merging config files to get deployed contract address jq -s '.[0] * .[1]' /app/src/mobymask-app-config.json /server/config.json > /app/src/config.json diff --git a/app/data/config/watcher-mobymask-v2/optimism-params.env b/app/data/config/watcher-mobymask-v2/optimism-params.env new file mode 100644 index 00000000..bafb37a8 --- /dev/null +++ b/app/data/config/watcher-mobymask-v2/optimism-params.env @@ -0,0 +1,12 @@ +# Change if pointing to an external optimism geth endpoint + +# L2 endpoints +# TODO: Add another env for complete URL to handle https +L2_GETH_HOST="op-geth" +L2_GETH_PORT=8545 +L2_NODE_HOST="op-node" +L2_NODE_PORT=8547 + +# Credentials for accounts to perform txs on L2 +PRIVATE_KEY_DEPLOYER= +PRIVATE_KEY_PEER= diff --git a/app/data/config/watcher-mobymask-v2/secrets-template.json b/app/data/config/watcher-mobymask-v2/secrets-template.json index 220efc8e..1397bbf1 100644 --- a/app/data/config/watcher-mobymask-v2/secrets-template.json +++ b/app/data/config/watcher-mobymask-v2/secrets-template.json @@ -1,5 +1,5 @@ { - "rpcUrl": "http://op-geth:8545", + "rpcUrl": "", "privateKey": "", "baseURI": "http://127.0.0.1:3002/#" } diff --git a/app/data/config/watcher-mobymask-v2/server-start.sh b/app/data/config/watcher-mobymask-v2/server-start.sh index 6b4cd345..5fe9ab84 100755 --- a/app/data/config/watcher-mobymask-v2/server-start.sh +++ b/app/data/config/watcher-mobymask-v2/server-start.sh @@ -1,11 +1,25 @@ #!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi # Assign deployed contract address from server config CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"') -L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) -sed "s/REPLACE_WITH_PRIVATE_KEY/${L1_PRIV_KEY_2}/" environments/watcher-config-template.toml > environments/local.toml +if [ -f /geth-accounts/accounts.csv ]; then + echo "Using L1 private key from the mounted volume" + # Read the private key of L1 account for sending txs from peer + PRIVATE_KEY_PEER=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) +else + echo "Using PRIVATE_KEY_PEER from env" +fi + +sed "s/REPLACE_WITH_PRIVATE_KEY/${PRIVATE_KEY_PEER}/" environments/watcher-config-template.toml > environments/local.toml sed -i "s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/" environments/local.toml +export L2_GETH_URL="http://${L2_GETH_HOST}:${L2_GETH_PORT}" +sed -i 's|REPLACE_WITH_L2_GETH_URL|'"${L2_GETH_URL}"'|' environments/local.toml + echo 'yarn server' yarn server diff --git a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml b/app/data/config/watcher-mobymask-v2/watcher-config-template.toml index ff937678..05554e3a 100644 --- a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml +++ b/app/data/config/watcher-mobymask-v2/watcher-config-template.toml @@ -59,7 +59,7 @@ [upstream] [upstream.ethServer] gqlApiEndpoint = "http://ipld-eth-server:8083/graphql" - rpcProviderEndpoint = "http://op-geth:8545" + rpcProviderEndpoint = "REPLACE_WITH_L2_GETH_URL" blockDelayInMilliSecs = 60000 [upstream.cache] diff --git a/app/data/container-build/cerc-mobymask/Dockerfile b/app/data/container-build/cerc-mobymask/Dockerfile index c94a1cea..caa3e6b5 100644 --- a/app/data/container-build/cerc-mobymask/Dockerfile +++ b/app/data/container-build/cerc-mobymask/Dockerfile @@ -1,6 +1,6 @@ FROM node:16.17.1-alpine3.16 -RUN apk --update --no-cache add python3 alpine-sdk jq +RUN apk --update --no-cache add python3 alpine-sdk jq bash WORKDIR /app diff --git a/app/data/pod-list.txt b/app/data/pod-list.txt index 79c4171a..f24c9ed0 100644 --- a/app/data/pod-list.txt +++ b/app/data/pod-list.txt @@ -15,7 +15,8 @@ watcher-erc20 watcher-erc721 watcher-uniswap-v3 watcher-mobymask-v2 -mobymask-laconicd +mobymask-app +peer-test-app test eth-probe keycloak diff --git a/app/data/stacks/fixturenet-optimism/README.md b/app/data/stacks/fixturenet-optimism/README.md index 65839e70..61ca97b3 100644 --- a/app/data/stacks/fixturenet-optimism/README.md +++ b/app/data/stacks/fixturenet-optimism/README.md @@ -2,7 +2,7 @@ Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2) -We support running just the L2 part of stack, given an external L1 endpoint. Follow [L2-ONLY](./L2-ONLY.md) for the same. +We support running just the L2 part of stack, given an external L1 endpoint. Follow [l2-only](./l2-only.md) for the same. ## Setup @@ -74,10 +74,10 @@ Clear volumes created by this stack: ```bash # List all relevant volumes -docker volume ls -q --filter name=laconic* +docker volume ls -q --filter "name=.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" # Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter name=laconic*) +docker volume rm $(docker volume ls -q --filter "name=.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") ``` ## Troubleshooting diff --git a/app/data/stacks/fixturenet-optimism/L2-ONLY.md b/app/data/stacks/fixturenet-optimism/l2-only.md similarity index 86% rename from app/data/stacks/fixturenet-optimism/L2-ONLY.md rename to app/data/stacks/fixturenet-optimism/l2-only.md index 55eab385..a1c33717 100644 --- a/app/data/stacks/fixturenet-optimism/L2-ONLY.md +++ b/app/data/stacks/fixturenet-optimism/l2-only.md @@ -55,7 +55,7 @@ The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to com To list down and monitor the running containers: ```bash -laconic-so --stack fixturenet-optimism deploy ps +laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism ps # With status docker ps @@ -76,10 +76,10 @@ Clear volumes created by this stack: ```bash # List all relevant volumes -docker volume ls -q --filter name=laconic* +docker volume ls -q --filter "name=.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" # Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter name=laconic*) +docker volume rm $(docker volume ls -q --filter "name=.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") ``` ## Troubleshooting diff --git a/app/data/stacks/mobymask-v2/README.md b/app/data/stacks/mobymask-v2/README.md index 68b9b61c..513bd431 100644 --- a/app/data/stacks/mobymask-v2/README.md +++ b/app/data/stacks/mobymask-v2/README.md @@ -1,6 +1,9 @@ # MobyMask v2 watcher -Instructions to deploy MobyMask v2 watcher stack using [laconic-stack-orchestrator](/README.md#install) +Instructions to setup and deploy an end-to-end MobyMask v2 stack ([L1](../fixturenet-eth/) + [L2](../fixturenet-optimism/) chains + watcher) using [laconic-stack-orchestrator](/README.md#install) + +We support running just the watcher part of stack, given an external L2 Optimism endpoint. +Follow [mobymask-only](./mobymask-only.md) for the same. ## Setup @@ -30,7 +33,7 @@ git checkout laconic # MobyMask cd ~/cerc/MobyMask -git checkout v0.1.1 +git checkout v0.1.2 # Optimism cd ~/cerc/optimism @@ -68,19 +71,9 @@ Deploy the stack: Find the watcher container's id and export it for later use: ```bash -laconic-so --stack mobymask-v2 deploy-system ps | grep "mobymask-watcher-server" - -export CONTAINER_ID= +export CONTAINER_ID=$(docker ps -q --filter "name=mobymask-watcher-server") ``` -Example output: - -``` -id: 5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c, name: laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp -``` - -In above output the container ID is `5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c` - Run the peer tests: ```bash @@ -128,11 +121,11 @@ Clear volumes: * List all relevant volumes: ```bash - docker volume ls -q --filter name=laconic* + docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" ``` * Remove all the listed volumes: ```bash - docker volume rm $(docker volume ls -q --filter name=laconic*) + docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") ``` diff --git a/app/data/stacks/mobymask-v2/mobymask-only.md b/app/data/stacks/mobymask-v2/mobymask-only.md new file mode 100644 index 00000000..843e7533 --- /dev/null +++ b/app/data/stacks/mobymask-v2/mobymask-only.md @@ -0,0 +1,85 @@ +# MobyMask v2 watcher + +Instructions to setup and deploy MobyMask v2 watcher independently + +## Setup + +Prerequisite: An L2 Optimism RPC endpoint + +Clone required repositories: + +```bash +laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/react-peer,cerc-io/mobymask-ui +``` + +Checkout to the required versions and branches in repos: + +```bash +```bash +# watcher-ts +cd ~/cerc/watcher-ts +git checkout v0.2.34 + +# react-peer +cd ~/cerc/react-peer +git checkout v0.2.31 + +# mobymask-ui +cd ~/cerc/mobymask-ui +git checkout laconic + +# MobyMask +cd ~/cerc/MobyMask +git checkout v0.1.2 +``` + +Build the container images: + +```bash +laconic-so --stack mobymask-v2 build-containers --include cerc/watcher-mobymask-v2,cerc/react-peer,cerc/mobymask-ui,cerc/mobymask +``` + +This should create the required docker images in the local image registry + +## Deploy + +Update the [optimism-params.env](../../config/watcher-mobymask-v2/optimism-params.env) file with Optimism endpoints and other params if running Optimism separately + +* NOTE: + * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file + * If Optimism is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +Deploy the stack: + +```bash +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 up +``` + +To list down and monitor the running containers: + +```bash +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 ps +# With status +docker ps +# Check logs for a container +docker logs -f +``` + +See [Tests](./README.md#tests) and [Demo](./README.md#demo) to interact with stack + +## Clean up + +Stop all services running in the background: + +```bash +laconic-so --stack mobymask-v2 deploy down --include watcher-mobymask-v2 +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts" +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts") +``` -- 2.45.2 From 59fe9aae59e2622761fe28beba9e5cc9e6d2614f Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Wed, 5 Apr 2023 17:52:12 +0530 Subject: [PATCH 07/17] Handle restarts in mobymask-v2 stack (#286) * Verify existing contract deployment * Update mobymask-v2 demo instructions --- .../docker-compose-watcher-mobymask-v2.yml | 12 ++++----- .../deploy-and-generate-invite.sh | 25 ++++++++++++++++--- .../{server-start.sh => start-server.sh} | 0 app/data/stacks/mobymask-v2/README.md | 18 ++++++------- app/data/stacks/mobymask-v2/demo.md | 20 +++++---------- app/data/stacks/mobymask-v2/mobymask-only.md | 9 ++++--- 6 files changed, 47 insertions(+), 37 deletions(-) rename app/data/config/watcher-mobymask-v2/{server-start.sh => start-server.sh} (100%) diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/app/data/compose/docker-compose-watcher-mobymask-v2.yml index 667191a1..fd68212a 100644 --- a/app/data/compose/docker-compose-watcher-mobymask-v2.yml +++ b/app/data/compose/docker-compose-watcher-mobymask-v2.yml @@ -39,7 +39,7 @@ services: - ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh - ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json - ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh - - moby_data_server:/app/packages/server + - mobymask_deployment:/app/packages/server - fixturenet_geth_accounts:/geth-accounts:ro extra_hosts: - "host.docker.internal:host-gateway" @@ -54,14 +54,14 @@ services: image: cerc/watcher-mobymask-v2:local env_file: - ../config/watcher-mobymask-v2/optimism-params.env - command: ["sh", "server-start.sh"] + command: ["sh", "start-server.sh"] volumes: - ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml - ../config/watcher-mobymask-v2/peer.env:/app/packages/peer/.env - ../config/watcher-mobymask-v2/relay-id.json:/app/packages/mobymask-v2-watcher/relay-id.json - ../config/watcher-mobymask-v2/peer-id.json:/app/packages/mobymask-v2-watcher/peer-id.json - - ../config/watcher-mobymask-v2/server-start.sh:/app/packages/mobymask-v2-watcher/server-start.sh - - moby_data_server:/server + - ../config/watcher-mobymask-v2/start-server.sh:/app/packages/mobymask-v2-watcher/start-server.sh + - mobymask_deployment:/server - fixturenet_geth_accounts:/geth-accounts:ro ports: - "0.0.0.0:3001:3001" @@ -89,7 +89,7 @@ services: - ../config/watcher-mobymask-v2/mobymask-app.env:/app/.env - ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/app/mobymask-app-start.sh - - moby_data_server:/server + - mobymask_deployment:/server ports: - "0.0.0.0:3002:3000" healthcheck: @@ -120,5 +120,5 @@ services: volumes: mobymask_watcher_db_data: - moby_data_server: + mobymask_deployment: fixturenet_geth_accounts: diff --git a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh index b53b6b56..2a3000ee 100755 --- a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh +++ b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh @@ -4,7 +4,6 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi - if [ -f /geth-accounts/accounts.csv ]; then echo "Using L1 private key from the mounted volume" # Read the private key of L1 account to deploy contract @@ -16,14 +15,34 @@ fi # Set the private key jq --arg privateKey "$PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json +# Set the RPC URL export L2_GETH_URL="http://${L2_GETH_HOST}:${L2_GETH_PORT}" jq --arg rpcUrl "$L2_GETH_URL" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json -cd ../hardhat export RPC_URL="${L2_GETH_URL}" +# Check and exit if a deployment already exists (on restarts) +if [ -f ./config.json ]; then + echo "config.json already exists, checking the contract deployment" + + # Read JSON file + DEPLOYMENT_DETAILS=$(cat config.json) + CONTRACT_ADDRESS=$(echo "$DEPLOYMENT_DETAILS" | jq -r '.address') + + cd ../hardhat + if yarn verifyDeployment --network optimism --contract "${CONTRACT_ADDRESS}"; then + echo "Deployment verfication successful" + cd ../server + else + echo "Deployment verfication failed, please clear MobyMask deployment volume before starting" + exit 1 + fi +fi + +# Wait until balance for deployer account is reflected +cd ../hardhat while true; do - ACCOUNT_BALANCE=$(yarn hardhat --network optimism balance $PRIVATE_KEY_DEPLOYER | grep ETH) + ACCOUNT_BALANCE=$(yarn balance --network optimism $PRIVATE_KEY_DEPLOYER | grep ETH) if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then echo "Account balance updated: $ACCOUNT_BALANCE" diff --git a/app/data/config/watcher-mobymask-v2/server-start.sh b/app/data/config/watcher-mobymask-v2/start-server.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/server-start.sh rename to app/data/config/watcher-mobymask-v2/start-server.sh diff --git a/app/data/stacks/mobymask-v2/README.md b/app/data/stacks/mobymask-v2/README.md index 513bd431..15652efe 100644 --- a/app/data/stacks/mobymask-v2/README.md +++ b/app/data/stacks/mobymask-v2/README.md @@ -116,16 +116,12 @@ Stop all the services running in background run: laconic-so --stack mobymask-v2 deploy-system down ``` -Clear volumes: +Clear volumes created by this stack: -* List all relevant volumes: +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*mobymask_deployment|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" - ```bash - docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" - ``` - -* Remove all the listed volumes: - - ```bash - docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") - ``` +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*mobymask_deployment|.*fixturenet_geth_accounts|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +``` diff --git a/app/data/stacks/mobymask-v2/demo.md b/app/data/stacks/mobymask-v2/demo.md index 8a5049c4..329b554d 100644 --- a/app/data/stacks/mobymask-v2/demo.md +++ b/app/data/stacks/mobymask-v2/demo.md @@ -3,7 +3,7 @@ * Get the root invite link URL for mobymask-app: ```bash - laconic-so --stack mobymask-v2 deploy-system logs mobymask + docker logs -f $(docker ps -aq --filter name="mobymask-1") ``` The invite link is seen at the end of the logs. Example log: @@ -29,19 +29,11 @@ * In the `MESSAGES` tab of other browsers, a message can be seen with the signed invocations. -* In a terminal check logs from the watcher peer container. +* In a terminal, check logs from the watcher peer container: - * Get the container id: - - ```bash - laconic-so --stack mobymask-v2 deploy-system ps | grep mobymask-watcher-server - ``` - - * Check logs: - - ```bash - docker logs -f - ``` + ```bash + docker logs -f $(docker ps -aq --filter name="mobymask-watcher-server") + ``` * It should have received the message, sent transaction to L2 chain and received a transaction receipt for an `invoke` message with block details. @@ -74,7 +66,7 @@ * Get the deployed contract address: ```bash - laconic-so --stack mobymask-v2 deploy-system exec mobymask-app "cat src/config.json" + docker exec -it $(docker ps -aq --filter name="mobymask-app") cat src/config.json ``` The value of `address` field is the deployed contract address diff --git a/app/data/stacks/mobymask-v2/mobymask-only.md b/app/data/stacks/mobymask-v2/mobymask-only.md index 843e7533..69c2007b 100644 --- a/app/data/stacks/mobymask-v2/mobymask-only.md +++ b/app/data/stacks/mobymask-v2/mobymask-only.md @@ -59,8 +59,10 @@ To list down and monitor the running containers: ```bash laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 ps + # With status docker ps + # Check logs for a container docker logs -f ``` @@ -72,14 +74,15 @@ See [Tests](./README.md#tests) and [Demo](./README.md#demo) to interact with sta Stop all services running in the background: ```bash -laconic-so --stack mobymask-v2 deploy down --include watcher-mobymask-v2 +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 down ``` Clear volumes created by this stack: ```bash # List all relevant volumes -docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts" +docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*mobymask_deployment|.*fixturenet_geth_accounts" + # Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*moby_data_server|.*fixturenet_geth_accounts") +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*mobymask_deployment|.*fixturenet_geth_accounts") ``` -- 2.45.2 From 6f781ae303915d7612fbc670b4f5f9b521b48121 Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Thu, 6 Apr 2023 15:17:00 +0530 Subject: [PATCH 08/17] Separate out watcher and web-apps in mobymask-v2 stack (#287) * Separate out watcher and web-apps in mobymask stack * Take L2 RPC endpoint from the env file * Changes to run watcher and mobymask web-app separately * Support running watcher without contract deployment and L2 txs * Remove duplicate mobymask params env * Add code comments * Add instructions for running web-apps separately * Self review fixes * Fix timeout for mobymask-app on watcher server --------- Co-authored-by: prathamesh0 --- .../compose/docker-compose-mobymask-app.yml | 31 +++++++ .../compose/docker-compose-peer-test-app.yml | 21 +++++ .../docker-compose-watcher-mobymask-v2.yml | 50 ++-------- .../deploy-and-generate-invite.sh | 18 +++- .../mobymask-app-config.json | 1 + .../watcher-mobymask-v2/mobymask-app-start.sh | 19 +++- .../watcher-mobymask-v2/mobymask-app.env | 1 - .../watcher-mobymask-v2/mobymask-params.env | 14 +++ .../watcher-mobymask-v2/optimism-params.env | 3 +- .../watcher-mobymask-v2/start-server.sh | 28 ++++-- .../watcher-mobymask-v2/test-app-start.sh | 14 +++ .../watcher-config-template.toml | 6 +- .../cerc-mobymask-ui/Dockerfile | 2 +- .../cerc-react-peer/Dockerfile | 2 +- app/data/stacks/mobymask-v2/mobymask-only.md | 27 +++--- app/data/stacks/mobymask-v2/stack.yml | 2 + app/data/stacks/mobymask-v2/web-apps.md | 91 +++++++++++++++++++ 17 files changed, 250 insertions(+), 80 deletions(-) create mode 100644 app/data/compose/docker-compose-mobymask-app.yml create mode 100644 app/data/compose/docker-compose-peer-test-app.yml delete mode 100644 app/data/config/watcher-mobymask-v2/mobymask-app.env create mode 100644 app/data/config/watcher-mobymask-v2/mobymask-params.env create mode 100755 app/data/config/watcher-mobymask-v2/test-app-start.sh create mode 100644 app/data/stacks/mobymask-v2/web-apps.md diff --git a/app/data/compose/docker-compose-mobymask-app.yml b/app/data/compose/docker-compose-mobymask-app.yml new file mode 100644 index 00000000..db95eeac --- /dev/null +++ b/app/data/compose/docker-compose-mobymask-app.yml @@ -0,0 +1,31 @@ +version: '3.2' + +services: + # Builds and serves the MobyMask react-app + mobymask-app: + image: cerc/mobymask-ui:local + env_file: + - ../config/watcher-mobymask-v2/mobymask-params.env + # Waits for watcher server to be up before app build + # Required when running with watcher stack to get deployed contract address + command: + - sh + - -c + - ./wait-for-it.sh -h $${WATCHER_HOST} -p $${WATCHER_PORT} -s -t 0 -- ./mobymask-app-start.sh + volumes: + - ../config/wait-for-it.sh:/app/wait-for-it.sh + - ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json + - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/app/mobymask-app-start.sh + - mobymask_deployment:/server + ports: + - "0.0.0.0:3002:3000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + shm_size: '1GB' + +volumes: + mobymask_deployment: diff --git a/app/data/compose/docker-compose-peer-test-app.yml b/app/data/compose/docker-compose-peer-test-app.yml new file mode 100644 index 00000000..3ddbd308 --- /dev/null +++ b/app/data/compose/docker-compose-peer-test-app.yml @@ -0,0 +1,21 @@ +version: '3.2' + +services: + peer-test-app: + # Builds and serves the peer-test react-app + image: cerc/react-peer:local + working_dir: /app/packages/test-app + env_file: + - ../config/watcher-mobymask-v2/mobymask-params.env + command: ["sh", "./test-app-start.sh"] + volumes: + - ../config/watcher-mobymask-v2/test-app-config.json:/app/packages/test-app/src/test-app-config.json + - ../config/watcher-mobymask-v2/test-app-start.sh:/app/packages/test-app/test-app-start.sh + ports: + - "0.0.0.0:3003:3000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/app/data/compose/docker-compose-watcher-mobymask-v2.yml index fd68212a..2e014471 100644 --- a/app/data/compose/docker-compose-watcher-mobymask-v2.yml +++ b/app/data/compose/docker-compose-watcher-mobymask-v2.yml @@ -1,6 +1,7 @@ version: '3.2' services: + # Starts the PostgreSQL database for watcher mobymask-watcher-db: restart: unless-stopped image: postgres:14-alpine @@ -21,13 +22,17 @@ services: retries: 15 start_period: 10s + # Deploys MobyMask contract and generates an invite link + # Deployment is skipped if DEPLOYED_CONTRACT env is already set mobymask: image: cerc/mobymask:local working_dir: /app/packages/server env_file: - ../config/watcher-mobymask-v2/optimism-params.env + - ../config/watcher-mobymask-v2/mobymask-params.env environment: - ENV=PROD + # Waits for L2 Optimism Geth and Node servers to be up before deploying contract command: - sh - -c @@ -44,6 +49,7 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the mobymask-v2-watcher server mobymask-watcher-server: restart: unless-stopped depends_on: @@ -54,6 +60,7 @@ services: image: cerc/watcher-mobymask-v2:local env_file: - ../config/watcher-mobymask-v2/optimism-params.env + - ../config/watcher-mobymask-v2/mobymask-params.env command: ["sh", "start-server.sh"] volumes: - ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml @@ -63,6 +70,7 @@ services: - ../config/watcher-mobymask-v2/start-server.sh:/app/packages/mobymask-v2-watcher/start-server.sh - mobymask_deployment:/server - fixturenet_geth_accounts:/geth-accounts:ro + # Expose GQL, metrics and relay node ports ports: - "0.0.0.0:3001:3001" - "0.0.0.0:9001:9001" @@ -76,48 +84,6 @@ services: extra_hosts: - "host.docker.internal:host-gateway" - # TODO: Move to a separate pod - mobymask-app: - depends_on: - mobymask-watcher-server: - condition: service_healthy - mobymask: - condition: service_completed_successfully - image: cerc/mobymask-ui:local - command: ["sh", "mobymask-app-start.sh"] - volumes: - - ../config/watcher-mobymask-v2/mobymask-app.env:/app/.env - - ../config/watcher-mobymask-v2/mobymask-app-config.json:/app/src/mobymask-app-config.json - - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/app/mobymask-app-start.sh - - mobymask_deployment:/server - ports: - - "0.0.0.0:3002:3000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "3000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - shm_size: '1GB' - - peer-test-app: - depends_on: - mobymask-watcher-server: - condition: service_healthy - image: cerc/react-peer:local - working_dir: /app/packages/test-app - command: ["sh", "-c", "yarn build && serve -s build"] - volumes: - - ../config/watcher-mobymask-v2/test-app-config.json:/app/packages/test-app/src/config.json - ports: - - "0.0.0.0:3003:3000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "3000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - volumes: mobymask_watcher_db_data: mobymask_deployment: diff --git a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh index 2a3000ee..743b1525 100755 --- a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh +++ b/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh @@ -4,6 +4,8 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +echo "Using L2 RPC endpoint ${L2_GETH_RPC}" + if [ -f /geth-accounts/accounts.csv ]; then echo "Using L1 private key from the mounted volume" # Read the private key of L1 account to deploy contract @@ -16,10 +18,16 @@ fi jq --arg privateKey "$PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json # Set the RPC URL -export L2_GETH_URL="http://${L2_GETH_HOST}:${L2_GETH_PORT}" -jq --arg rpcUrl "$L2_GETH_URL" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json +jq --arg rpcUrl "$L2_GETH_RPC" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json -export RPC_URL="${L2_GETH_URL}" +export RPC_URL="${L2_GETH_RPC}" + +# Check if DEPLOYED_CONTRACT environment variable set to skip contract deployment +if [[ -n "$DEPLOYED_CONTRACT" ]]; then + echo "DEPLOYED_CONTRACT is set to '$DEPLOYED_CONTRACT'" + echo "Exiting without deploying contract" + exit 0 +fi # Check and exit if a deployment already exists (on restarts) if [ -f ./config.json ]; then @@ -39,10 +47,10 @@ if [ -f ./config.json ]; then fi fi -# Wait until balance for deployer account is reflected +# Wait until balance for deployer account is updated cd ../hardhat while true; do - ACCOUNT_BALANCE=$(yarn balance --network optimism $PRIVATE_KEY_DEPLOYER | grep ETH) + ACCOUNT_BALANCE=$(yarn balance --network optimism "$PRIVATE_KEY_DEPLOYER" | grep ETH) if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then echo "Account balance updated: $ACCOUNT_BALANCE" diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-config.json b/app/data/config/watcher-mobymask-v2/mobymask-app-config.json index 3b00674f..2ccb816d 100644 --- a/app/data/config/watcher-mobymask-v2/mobymask-app-config.json +++ b/app/data/config/watcher-mobymask-v2/mobymask-app-config.json @@ -1,4 +1,5 @@ { + "name": "MobyMask", "relayNodes": [ "/ip4/127.0.0.1/tcp/9090/ws/p2p/12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t" ], diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh b/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh index efc4ed96..680e2e8a 100755 --- a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh +++ b/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh @@ -4,9 +4,22 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -# Merging config files to get deployed contract address -jq -s '.[0] * .[1]' /app/src/mobymask-app-config.json /server/config.json > /app/src/config.json +# Use config from mounted volume if available (when running web-app along with watcher stack) +if [ -f /server/config.json ]; then + echo "Merging config for deployed contract from mounted volume" + # Merging config files to get deployed contract address + jq -s '.[0] * .[1]' /app/src/mobymask-app-config.json /server/config.json > /app/src/config.json +else + echo "Setting deployed contract details from env" -npm run build + # Set config values from environment variables + jq --arg address "$DEPLOYED_CONTRACT" \ + --argjson chainId $CHAIN_ID \ + --argjson relayNodes "$RELAY_NODES" \ + '.address = $address | .chainId = $chainId | .relayNodes = $relayNodes' \ + /app/src/mobymask-app-config.json > /app/src/config.json +fi + +REACT_APP_WATCHER_URI="$APP_WATCHER_URL/graphql" npm run build serve -s build diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app.env b/app/data/config/watcher-mobymask-v2/mobymask-app.env deleted file mode 100644 index 698d3502..00000000 --- a/app/data/config/watcher-mobymask-v2/mobymask-app.env +++ /dev/null @@ -1 +0,0 @@ -REACT_APP_WATCHER_URI=http://localhost:3001/graphql diff --git a/app/data/config/watcher-mobymask-v2/mobymask-params.env b/app/data/config/watcher-mobymask-v2/mobymask-params.env new file mode 100644 index 00000000..890c183b --- /dev/null +++ b/app/data/config/watcher-mobymask-v2/mobymask-params.env @@ -0,0 +1,14 @@ +# Change if pointing web app to external watcher endpoint +WATCHER_HOST="mobymask-watcher-server" +WATCHER_PORT=3001 +APP_WATCHER_URL="http://localhost:3001" + +# Set deployed MobyMask contract address to avoid deploying contract in stack +# mobymask-app will use this contract address in config if run separately +DEPLOYED_CONTRACT= + +# Chain ID is used by mobymask web-app for txs +CHAIN_ID=42069 + +# Set relay nodes to be used by web-apps +RELAY_NODES=["/ip4/127.0.0.1/tcp/9090/ws/p2p/12D3KooWSPCsVkHVyLQoCqhu2YRPvvM7o6r6NRYyLM5zeA6Uig5t"] diff --git a/app/data/config/watcher-mobymask-v2/optimism-params.env b/app/data/config/watcher-mobymask-v2/optimism-params.env index bafb37a8..489dd879 100644 --- a/app/data/config/watcher-mobymask-v2/optimism-params.env +++ b/app/data/config/watcher-mobymask-v2/optimism-params.env @@ -1,9 +1,10 @@ # Change if pointing to an external optimism geth endpoint # L2 endpoints -# TODO: Add another env for complete URL to handle https +L2_GETH_RPC="http://op-geth:8545" L2_GETH_HOST="op-geth" L2_GETH_PORT=8545 + L2_NODE_HOST="op-node" L2_NODE_PORT=8547 diff --git a/app/data/config/watcher-mobymask-v2/start-server.sh b/app/data/config/watcher-mobymask-v2/start-server.sh index 5fe9ab84..4030e522 100755 --- a/app/data/config/watcher-mobymask-v2/start-server.sh +++ b/app/data/config/watcher-mobymask-v2/start-server.sh @@ -4,8 +4,15 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -# Assign deployed contract address from server config -CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"') +echo "Using L2 RPC endpoint ${L2_GETH_RPC}" + +# Use contract address from environment variable or set from config.json in mounted volume +if [ -n "$DEPLOYED_CONTRACT" ]; then + CONTRACT_ADDRESS="${DEPLOYED_CONTRACT}" +else + # Assign deployed contract address from server config (created by mobymask container after deploying contract) + CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"') +fi if [ -f /geth-accounts/accounts.csv ]; then echo "Using L1 private key from the mounted volume" @@ -15,11 +22,20 @@ else echo "Using PRIVATE_KEY_PEER from env" fi -sed "s/REPLACE_WITH_PRIVATE_KEY/${PRIVATE_KEY_PEER}/" environments/watcher-config-template.toml > environments/local.toml -sed -i "s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/" environments/local.toml +# Set private key and contract address for watcher peer txs to L2 only if PRIVATE_KEY_PEER variable is set +if [ -n "$PRIVATE_KEY_PEER" ]; then + # Read in config template TOML file and modify it + CONTENT=$(cat environments/watcher-config-template.toml) + NEW_CONTENT=$(echo "$CONTENT" | sed -E "/\[metrics\]/i \\\n\n [server.p2p.peer.l2TxConfig]\n privateKey = \"${PRIVATE_KEY_PEER}\"\n contractAddress = \"${CONTRACT_ADDRESS}\"\n") -export L2_GETH_URL="http://${L2_GETH_HOST}:${L2_GETH_PORT}" -sed -i 's|REPLACE_WITH_L2_GETH_URL|'"${L2_GETH_URL}"'|' environments/local.toml + # Write the modified content to a watcher config file + echo "$NEW_CONTENT" > environments/local.toml + + sed -i 's|REPLACE_WITH_L2_GETH_RPC_ENDPOINT|'"${L2_GETH_RPC}"'|' environments/local.toml +else + # Copy template config to watcher config without setting params for peer L2 txs + cp environments/watcher-config-template.toml environments/local.toml +fi echo 'yarn server' yarn server diff --git a/app/data/config/watcher-mobymask-v2/test-app-start.sh b/app/data/config/watcher-mobymask-v2/test-app-start.sh new file mode 100755 index 00000000..de6ccc7c --- /dev/null +++ b/app/data/config/watcher-mobymask-v2/test-app-start.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Set relay nodes in config from RELAY_NODES environment variable +jq --argjson relayNodes "$RELAY_NODES" \ + '.relayNodes = $relayNodes' \ + ./src/test-app-config.json > ./src/config.json + +yarn build + +serve -s build diff --git a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml b/app/data/config/watcher-mobymask-v2/watcher-config-template.toml index 05554e3a..6b0223c7 100644 --- a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml +++ b/app/data/config/watcher-mobymask-v2/watcher-config-template.toml @@ -36,10 +36,6 @@ peerIdFile = './peer-id.json' enableDebugInfo = true - [server.p2p.peer.l2TxConfig] - privateKey = 'REPLACE_WITH_PRIVATE_KEY' - contractAddress = 'REPLACE_WITH_CONTRACT_ADDRESS' - [metrics] host = "0.0.0.0" port = 9000 @@ -59,7 +55,7 @@ [upstream] [upstream.ethServer] gqlApiEndpoint = "http://ipld-eth-server:8083/graphql" - rpcProviderEndpoint = "REPLACE_WITH_L2_GETH_URL" + rpcProviderEndpoint = "REPLACE_WITH_L2_GETH_RPC_ENDPOINT" blockDelayInMilliSecs = 60000 [upstream.cache] diff --git a/app/data/container-build/cerc-mobymask-ui/Dockerfile b/app/data/container-build/cerc-mobymask-ui/Dockerfile index 68e3c77c..c04b001d 100644 --- a/app/data/container-build/cerc-mobymask-ui/Dockerfile +++ b/app/data/container-build/cerc-mobymask-ui/Dockerfile @@ -1,6 +1,6 @@ FROM node:18.15.0-alpine3.16 -RUN apk --update --no-cache add make git jq +RUN apk --update --no-cache add make git jq bash WORKDIR /app diff --git a/app/data/container-build/cerc-react-peer/Dockerfile b/app/data/container-build/cerc-react-peer/Dockerfile index cc852441..fcc198f3 100644 --- a/app/data/container-build/cerc-react-peer/Dockerfile +++ b/app/data/container-build/cerc-react-peer/Dockerfile @@ -1,6 +1,6 @@ FROM node:18.15.0-alpine3.16 -RUN apk --update --no-cache add make git python3 +RUN apk --update --no-cache add make git python3 jq WORKDIR /app diff --git a/app/data/stacks/mobymask-v2/mobymask-only.md b/app/data/stacks/mobymask-v2/mobymask-only.md index 69c2007b..246d9000 100644 --- a/app/data/stacks/mobymask-v2/mobymask-only.md +++ b/app/data/stacks/mobymask-v2/mobymask-only.md @@ -4,30 +4,21 @@ Instructions to setup and deploy MobyMask v2 watcher independently ## Setup -Prerequisite: An L2 Optimism RPC endpoint +Prerequisite: L2 Optimism Geth and Node RPC endpoints Clone required repositories: ```bash -laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/react-peer,cerc-io/mobymask-ui +laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts ``` Checkout to the required versions and branches in repos: -```bash ```bash # watcher-ts cd ~/cerc/watcher-ts git checkout v0.2.34 -# react-peer -cd ~/cerc/react-peer -git checkout v0.2.31 - -# mobymask-ui -cd ~/cerc/mobymask-ui -git checkout laconic - # MobyMask cd ~/cerc/MobyMask git checkout v0.1.2 @@ -36,20 +27,24 @@ git checkout v0.1.2 Build the container images: ```bash -laconic-so --stack mobymask-v2 build-containers --include cerc/watcher-mobymask-v2,cerc/react-peer,cerc/mobymask-ui,cerc/mobymask +laconic-so --stack mobymask-v2 build-containers --include cerc/watcher-mobymask-v2,cerc/mobymask ``` This should create the required docker images in the local image registry ## Deploy -Update the [optimism-params.env](../../config/watcher-mobymask-v2/optimism-params.env) file with Optimism endpoints and other params if running Optimism separately +### Configuration +* In [mobymask-params.env](../../config/watcher-mobymask-v2/mobymask-params.env) file set `DEPLOYED_CONTRACT` to existing deployed mobymask contract address + * Setting `DEPLOYED_CONTRACT` will skip contract deployment when running stack +* Update the [optimism-params.env](../../config/watcher-mobymask-v2/optimism-params.env) file with Optimism endpoints and other params for the Optimism running separately + * If `PRIVATE_KEY_PEER` is not set the inline watcher peer will not send txs to L2 on receiving P2P network messages * NOTE: * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file * If Optimism is running on the host machine, use `host.docker.internal` as the hostname to access the host port -Deploy the stack: +### Deploy the stack ```bash laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 up @@ -67,7 +62,9 @@ docker ps docker logs -f ``` -See [Tests](./README.md#tests) and [Demo](./README.md#demo) to interact with stack +## Tests + +See [Tests](./README.md#tests) ## Clean up diff --git a/app/data/stacks/mobymask-v2/stack.yml b/app/data/stacks/mobymask-v2/stack.yml index cdcbd663..248351b1 100644 --- a/app/data/stacks/mobymask-v2/stack.yml +++ b/app/data/stacks/mobymask-v2/stack.yml @@ -27,3 +27,5 @@ pods: - fixturenet-eth - fixturenet-optimism - watcher-mobymask-v2 + - mobymask-app + - peer-test-app diff --git a/app/data/stacks/mobymask-v2/web-apps.md b/app/data/stacks/mobymask-v2/web-apps.md new file mode 100644 index 00000000..052a7cb7 --- /dev/null +++ b/app/data/stacks/mobymask-v2/web-apps.md @@ -0,0 +1,91 @@ +# Web Apps + +Instructions to setup and deploy MobyMask and Peer Test web apps + +## Setup + +Prerequisite: Watcher with GQL and relay node endpoints + +Clone required repositories: + +```bash +laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/react-peer,cerc-io/mobymask-ui +``` + +Checkout to the required versions and branches in repos: + +```bash +# react-peer +cd ~/cerc/react-peer +git checkout v0.2.31 + +# mobymask-ui +cd ~/cerc/mobymask-ui +git checkout laconic +``` + +Build the container images: + +```bash +laconic-so --stack mobymask-v2 build-containers --include cerc/react-peer-v2,cerc/mobymask-ui +``` + +This should create the required docker images in the local image registry + +## Deploy + +### Configuration + +* Update the [mobymask-params.env](../../config/watcher-mobymask-v2/mobymask-params.env) file with watcher endpoints and other params required by the web-apps + * `WATCHER_HOST` and `WATCHER_PORT` is used to check if watcher is up before building and deploying mobymask-app + * `APP_WATCHER_URL` is used by mobymask-app to make GQL queries + * `DEPLOYED_CONTRACT` and `CHAIN_ID` is used by mobymask-app in app config when creating messgaes for L2 txs + * `RELAY_NODES` is used by the web-apps to connect to the relay nodes (run in watcher) +* NOTE: + * Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file + * If watcher is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +### Deploy the stack + +For running mobymask-app +```bash +laconic-so --stack mobymask-v2 deploy --include mobymask-app up +``` + +For running peer-test-app +```bash +laconic-so --stack mobymask-v2 deploy --include peer-test-app up +``` + +To list down and monitor the running containers: + +```bash +docker ps + +# Check logs for a container +docker logs -f +``` + +## Clean up + +Stop all services running in the background: + +For mobymask-app +```bash +laconic-so --stack mobymask-v2 deploy --include mobymask-app down +``` + +For peer-test-app +```bash +laconic-so --stack mobymask-v2 deploy --include peer-test-app down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_deployment" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_deployment") +``` -- 2.45.2 From 0432a4bf29f7055b229d25352713699fe10972e4 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 07:19:08 -0600 Subject: [PATCH 09/17] Try to enable CI --- .gitea/workflows/test.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .gitea/workflows/test.yml diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml new file mode 100644 index 00000000..77dd4c86 --- /dev/null +++ b/.gitea/workflows/test.yml @@ -0,0 +1,14 @@ +name: Tests + +on: + pull_request: + branches: '*' + push: + branches: '*' + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Clone project repository + uses: actions/checkout@v3 -- 2.45.2 From 7dd07beec60966e8a583cd277c82521816b9e874 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 07:28:13 -0600 Subject: [PATCH 10/17] Try adding job name --- .gitea/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 77dd4c86..47443031 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -8,6 +8,7 @@ on: jobs: test: + name: "Run basic test suite" runs-on: ubuntu-latest steps: - name: Clone project repository -- 2.45.2 From 90210e439fa76bc0d0cfc39201c9169fd4f311b2 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:20:22 -0600 Subject: [PATCH 11/17] Add build --- .gitea/workflows/test.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 47443031..e1a7c924 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -11,5 +11,15 @@ jobs: name: "Run basic test suite" runs-on: ubuntu-latest steps: - - name: Clone project repository + - name: "Clone project repository" uses: actions/checkout@v3 + - name: "Install Python" + uses: cerc-io/setup-python@v4 + with: + python-version: '3.8' + - name: "Check Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Build local shiv bundle" + run: ./scripts/build_shiv_package.sh -- 2.45.2 From e7a4de594056e85542bc69d4451498d7266e9511 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:24:17 -0600 Subject: [PATCH 12/17] Bump CI --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index e1a7c924..e7e0e6dd 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -21,5 +21,5 @@ jobs: run: python3 --version - name: "Install shiv" run: pip install shiv - - name: "Build local shiv bundle" + - name: "Build local shiv package" run: ./scripts/build_shiv_package.sh -- 2.45.2 From b850322f57aaa1b789656d3ab1f8286b0893f581 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:32:56 -0600 Subject: [PATCH 13/17] Add the smoke test --- .gitea/workflows/test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index e7e0e6dd..3615aee6 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -23,3 +23,5 @@ jobs: run: pip install shiv - name: "Build local shiv package" run: ./scripts/build_shiv_package.sh + - name: "Run smoke tests" + run: ./tests/run-smoke-test.sh -- 2.45.2 From 2e2927f7885f86690dd8017fdcd6e9c182dc9e3c Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:35:16 -0600 Subject: [PATCH 14/17] Use correct path --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 3615aee6..97c4ecc3 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -24,4 +24,4 @@ jobs: - name: "Build local shiv package" run: ./scripts/build_shiv_package.sh - name: "Run smoke tests" - run: ./tests/run-smoke-test.sh + run: ./tests/smoke-test/run-smoke-test.sh -- 2.45.2 From 4f137915bb200a0cf8cacae2d8a6b0d09cf01276 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:36:58 -0600 Subject: [PATCH 15/17] Change step name --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 97c4ecc3..e551240d 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -17,7 +17,7 @@ jobs: uses: cerc-io/setup-python@v4 with: python-version: '3.8' - - name: "Check Python version" + - name: "Print Python version" run: python3 --version - name: "Install shiv" run: pip install shiv -- 2.45.2 From 9e2b140e101b8545d248af62eb22e61f2e1235b9 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Thu, 6 Apr 2023 13:50:50 -0600 Subject: [PATCH 16/17] Ubuntu install script (#289) * Copy of Zach's install script * Few small fixes after testing on DO droplet * Update scripts * Rename script * Add sudo notice * Fix typo * Fix another typo * Update docker instructions --- scripts/developer-mode-setup.sh | 11 +++ scripts/first_time_setup.sh | 8 --- scripts/quick-install-ubuntu.sh | 118 ++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 8 deletions(-) create mode 100755 scripts/developer-mode-setup.sh delete mode 100755 scripts/first_time_setup.sh create mode 100755 scripts/quick-install-ubuntu.sh diff --git a/scripts/developer-mode-setup.sh b/scripts/developer-mode-setup.sh new file mode 100755 index 00000000..997c6173 --- /dev/null +++ b/scripts/developer-mode-setup.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Script to automate the steps needed to make a cloned project repo runnable on the path +# (beware of PATH having some other file with the same name ahead of ours) +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x + echo PATH is $PATH +fi +python3 -m venv venv +source ./venv/bin/activate +pip install --editable . +pip install shiv diff --git a/scripts/first_time_setup.sh b/scripts/first_time_setup.sh deleted file mode 100755 index e8900ce7..00000000 --- a/scripts/first_time_setup.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -python3 -m venv venv -source ./venv/bin/activate -pip install --editable . -pip install shiv -shiv -c laconic-so -o laconic-so . -./laconic-so --verbose --local-stack setup-repositories diff --git a/scripts/quick-install-ubuntu.sh b/scripts/quick-install-ubuntu.sh new file mode 100755 index 00000000..3611abdf --- /dev/null +++ b/scripts/quick-install-ubuntu.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi + +install_dir=~/bin + +# First display a reasonable warning to the user unless run with -y +if ! [[ $# -eq 1 && $1 == "-y" ]]; then + echo "**************************************************************************************" + echo "This script requires sudo privilege. It installs Laconic Stack Orchestrator" + echo "into: ${install_dir}. It also *removes* any existing docker installed on" + echo "this machine and then installs the latest docker release as well as other" + echo "required packages." + echo "Only proceed if you are sure you want to make those changes to this machine." + echo "**************************************************************************************" + read -p "Are you sure you want to proceed? " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi +fi + +# This script assumes root permissions on a fresh Ubuntu Digital Ocean droplet +# with these recommended specs: 16 GB Memory / 8 Intel vCPUs / 320 GB Disk + +# TODO: +# Check python3 is available +# Check machine resources are sufficient + +# dismiss the popups +export DEBIAN_FRONTEND=noninteractive + +## https://docs.docker.com/engine/install/ubuntu/ +## https://superuser.com/questions/518859/ignore-packages-that-are-not-currently-installed-when-using-apt-get-remove1 +packages_to_remove="docker docker-engine docker.io containerd runc" +installed_packages_to_remove="" +for package_to_remove in $(echo $packages_to_remove); do + $(dpkg --info $package_to_remove &> /dev/null) + if [[ $? -eq 0 ]]; then + installed_packages_to_remove="$installed_packages_to_remove $package_to_remove" + fi +done + +# Enable stop on error now, since we needed it off for the code above +set -euo pipefail ## https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ + +if [[ -n "${installed_packages_to_remove}" ]]; then + echo "**************************************************************************************" + echo "Removing existing docker packages" + sudo apt -y remove $installed_packages_to_remove +fi + +echo "**************************************************************************************" +echo "Installing dependencies" +sudo apt -y update + +# laconic-so depends on jq +sudo apt -y install jq +# laconic-so depends on git +sudo apt -y install git +# curl used below +sudo apt -y install jq +# docker repo add depends on gnupg and updated ca-certificates +sudo apt -y install ca-certificates gnupg + +# Add dockerco package repository +sudo mkdir -m 0755 -p /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +# Penny in the update jar +sudo apt -y update + +echo "**************************************************************************************" +echo "Installing docker" +sudo apt -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +# Allow the current user to use Docker +sudo usermod -aG docker $USER + +echo "**************************************************************************************" +echo "Installing laconic-so" +# install latest `laconic-so` +install_filename=${install_dir}/laconic-so +mkdir -p ${install_dir} +curl -L -o ${install_filename} https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +chmod +x ${install_filename} + +echo "**************************************************************************************" +# Check if our PATH line is already there +path_add_command="export PATH=\$PATH:${install_dir}" +if ! grep -q "${path_add_command}" ~/.profile +then + echo "Adding this line to the end of ~/.profile:" + echo ${path_add_command} + echo ${path_add_command} >> ~/.profile +fi + +echo "**************************************************************************************" +# PATH set here for commands run in this script +export PATH=$PATH:${install_dir} +echo Installed laconic-so version: $(laconic-so version) + +echo "**************************************************************************************" +echo "The Laconic Stack Orchestrator program laconic-so has been installed at ${install_filename}" +echo "The directory ${install_dir} has been added to PATH in *new* shells via ~/.profile" +echo "**************************************************************************************" +# Message the user to check docker is working for them +echo "Please log in again (docker will not work in this current shell) then:" +echo "test that docker is correctly installed and working for your user by running the" +echo "command below (it should print a message beginning \"Hello from Docker!\"):" +echo +echo "docker run hello-world" +echo -- 2.45.2 From 579fa6ff6632c6b73831e479e5642ddaa4610637 Mon Sep 17 00:00:00 2001 From: zramsay Date: Thu, 6 Apr 2023 19:28:27 -0400 Subject: [PATCH 17/17] more little updates --- app/data/stacks/build-support/README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/app/data/stacks/build-support/README.md b/app/data/stacks/build-support/README.md index e3993402..b4090d76 100644 --- a/app/data/stacks/build-support/README.md +++ b/app/data/stacks/build-support/README.md @@ -12,10 +12,9 @@ To use a user-supplied registry set these environment variables: Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry. ### 1. Build support containers -``` -$ laconic-so --stack build-support build-containers -``` -Note that the scheme/gerbil builder container can take a while to build so if you aren't going to build scheme projects it can be skipped with: + +Note: the scheme/gerbil container is excluded as it isn't currently required for the package registry. + ``` $ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil ``` @@ -37,6 +36,9 @@ Gitea was configured to use host name: gitea.local, ensure that this resolves to Success, gitea is properly initialized $ ``` + +Note: the above commands can take several minutes depending on the specs of your machine. + ### 3. Configure the hostname gitea.local How to do this is OS-dependent but usually involves editing a `hosts` file. For example on Linux add this line to the file `/etc/hosts` (needs sudo): ``` @@ -56,5 +58,5 @@ Now npm packages can be built: Ensure that `CERC_NPM_AUTH_TOKEN` is set with the token printed above when the package-registry stack was deployed (the actual token value will be different than shown in this example): ``` $ export CERC_NPM_AUTH_TOKEN=84fe66a73698bf11edbdccd0a338236b7d1d5c45 -$ laconic-so build-npms --include laconic-sdk +$ laconic-so build-npms --include laconic-sdk,laconic-registry-cli ``` -- 2.45.2