Add ability to run Optimism fixturenet with external L1 endpoint #273

Merged
prathamesh0 merged 13 commits from pm-composable-pods into main 2023-04-04 09:23:28 +00:00
19 changed files with 349 additions and 99 deletions

View File

@ -58,12 +58,6 @@ services:
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local
healthcheck:
test: ["CMD", "/scripts/status-internal.sh"]
interval: 10s
timeout: 100s
retries: 3
start_period: 15s
fixturenet-eth-lighthouse-1:
hostname: fixturenet-eth-lighthouse-1

View File

@ -1,40 +1,46 @@
version: '3.7'
services:
# Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts)
# Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts:
hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
depends_on:
fixturenet-eth-geth-1:
condition: service_healthy
fixturenet-eth-bootnode-lighthouse:
condition: service_healthy
environment:
CHAIN_ID: 1212
L1_RPC: "http://fixturenet-eth-geth-1:8545"
command: "./run.sh"
env_file:
- ../config/fixturenet-optimism/l1-params.env
# Waits for L1 endpoint to be up before running the script
command: |
"./wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- ./run.sh"
volumes:
- ../config/fixturenet-optimism/optimism-contracts/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../config/fixturenet-optimism/optimism-contracts/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- fixturenet_geth_accounts:/geth-accounts:ro
- l2_accounts:/l2-accounts
- l1_deployment:/app/packages/contracts-bedrock
extra_hosts:
- "host.docker.internal:host-gateway"
# Generates the config files required for L2 (outputs to volume op_node_data)
op-node-l2-config-gen:
image: cerc/optimism-op-node:local
depends_on:
fixturenet-optimism-contracts:
condition: service_completed_successfully
environment:
L1_RPC: "http://fixturenet-eth-geth-1:8545"
env_file:
- ../config/fixturenet-optimism/l1-params.env
volumes:
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/contracts-bedrock:ro
- op_node_data:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client
op-geth:
image: cerc/optimism-l2geth:local
depends_on:
@ -55,9 +61,10 @@ services:
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
environment:
L1_RPC: "http://fixturenet-eth-geth-1:8545"
env_file:
- ../config/fixturenet-optimism/l1-params.env
depends_on:
op-geth:
condition: service_healthy
@ -75,25 +82,32 @@ services:
timeout: 10s
retries: 10
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher:
environment:
L1_RPC: "http://fixturenet-eth-geth-1:8545"
env_file:
- ../config/fixturenet-optimism/l1-params.env
depends_on:
fixturenet-eth-geth-1:
condition: service_healthy
op-node:
condition: service_healthy
op-geth:
condition: service_healthy
image: cerc/optimism-op-batcher:local
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh
- l2_accounts:/l2-accounts:ro
entrypoint: "sh"
command: "/run-op-batcher.sh"
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the batcher
command: |
"/wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- /run-op-batcher.sh"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
op_node_data:
l2_accounts:
fixturenet_geth_accounts:
l1_deployment:
l2_accounts:
op_node_data:

View File

@ -33,12 +33,12 @@ services:
# TODO: Configure env file for ETH RPC URL & private key
environment:
- ENV=PROD
command: ["sh", "./deploy-invite.sh"]
command: ["sh", "./deploy-and-generate-invite.sh"]
volumes:
# TODO: add a script to set rpc endpoint from env
# TODO: add a script to set RPC endpoint from env
# add manually if running seperately
- ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json
- ../config/watcher-mobymask-v2/deploy-invite.sh:/app/packages/server/deploy-invite.sh
- ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh
- moby_data_server:/app/packages/server
- fixturenet_geth_accounts:/geth-accounts:ro
healthcheck:
@ -49,7 +49,7 @@ services:
start_period: 10s
mobymask-watcher-server:
# TODO: pass optimism rpc endpoint
# TODO: pass optimism RPC endpoint
restart: unless-stopped
depends_on:
mobymask-watcher-db:
@ -59,7 +59,7 @@ services:
image: cerc/watcher-mobymask-v2:local
command: ["sh", "server-start.sh"]
volumes:
# TODO: add a script to set rpc endpoint from env
# TODO: add a script to set RPC endpoint from env
# add manually if running seperately
- ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/packages/mobymask-v2-watcher/environments/watcher-config-template.toml
- ../config/watcher-mobymask-v2/peer.env:/app/packages/peer/.env

View File

@ -1,5 +1,8 @@
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
set -x
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
fi
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \

Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
Review

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154.

The idea is that you can run: laconic-so --debug <something> and get set -x in all shell scripts without modifying the script files.

Note that there is an existing convention for script debugging that may be useful here: https://github.com/cerc-io/stack-orchestrator/blob/main/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh#L2 , and the associated Python code: https://github.com/cerc-io/stack-orchestrator/blob/main/app/deploy_system.py#L154. The idea is that you can run: `laconic-so --debug <something>` and get `set -x` in all shell scripts without modifying the script files.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.
prathamesh0 commented 2023-04-03 13:32:12 +00:00 (Migrated from github.com)
Review

Ok. Made changes to use the suggested pattern.

Ok. Made changes to use the suggested pattern.

View File

@ -0,0 +1,9 @@
# Change if pointing to an external L1 endpoint
L1_RPC="http://fixturenet-eth-geth-1:8545"
L1_CHAIN_ID=1212
L1_HOST="fixturenet-eth-geth-1"
L1_PORT=8545
L1_ADDRESS=
L1_PRIV_KEY=
L1_ADDRESS_2=
L1_PRIV_KEY_2=

View File

@ -1,5 +1,8 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO Support restarts; fixturenet-eth-geth currently starts fresh on a restart
# Exit if a deployment already exists (on restarts)
@ -8,12 +11,14 @@ set -e
# exit 0
# fi
echo "Using L1 RPC endpoint ${L1_RPC}"
# Append tasks/index.ts file
echo "import './rekey-json'" >> tasks/index.ts
echo "import './send-balance'" >> tasks/index.ts
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CHAIN_ID,/}" hardhat.config.ts
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $L1_CHAIN_ID,/}" hardhat.config.ts
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
@ -29,11 +34,27 @@ BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Read the private key of L1 accounts
# TODO: Take from env if /geth-accounts volume doesn't exist to allow using separately running L1
if [ -f /geth-accounts/accounts.csv ]; then
echo "Using L1 account credentials from the mounted volume"
L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Using L1 account credentials from env"
fi
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${L1_PRIV_KEY}" --network getting-started
@ -42,19 +63,9 @@ yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key
echo "Balances sent to L2 accounts"
# Select a finalized L1 block as the starting point for roll ups
# TODO Use web3.js to get the latest finalized block
until CAST_OUTPUT=$(cast block finalized --rpc-url "$L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKHASH=$(echo "$CAST_OUTPUT" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$CAST_OUTPUT" | awk '/timestamp/{print $2}')
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
jq --arg chainid "$L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"

View File

@ -1,5 +1,8 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Get BACTHER_KEY from keys.json
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')

View File

@ -1,5 +1,8 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
mkdir datadir

View File

@ -1,5 +1,8 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Get SEQUENCER KEY from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')

182
app/data/config/wait-for-it.sh Executable file
View File

@ -0,0 +1,182 @@
#!/usr/bin/env bash
# Use this script to test if a given TCP host/port are available
WAITFORIT_cmdname=${0##*/}
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
-s | --strict Only execute subcommand if the test succeeds
-q | --quiet Don't output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
wait_for()
{
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
else
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
fi
WAITFORIT_start_ts=$(date +%s)
while :
do
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
WAITFORIT_result=$?
else
(echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
WAITFORIT_result=$?
fi
if [[ $WAITFORIT_result -eq 0 ]]; then
WAITFORIT_end_ts=$(date +%s)
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
break
fi
sleep 1
done
return $WAITFORIT_result
}
wait_for_wrapper()
{
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
else
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
fi
WAITFORIT_PID=$!
trap "kill -INT -$WAITFORIT_PID" INT
wait $WAITFORIT_PID
WAITFORIT_RESULT=$?
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
fi
return $WAITFORIT_RESULT
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
*:* )
WAITFORIT_hostport=(${1//:/ })
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
shift 1
;;
--child)
WAITFORIT_CHILD=1
shift 1
;;
-q | --quiet)
WAITFORIT_QUIET=1
shift 1
;;
-s | --strict)
WAITFORIT_STRICT=1
shift 1
;;
-h)
WAITFORIT_HOST="$2"
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
WAITFORIT_HOST="${1#*=}"
shift 1
;;
-p)
WAITFORIT_PORT="$2"
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
WAITFORIT_PORT="${1#*=}"
shift 1
;;
-t)
WAITFORIT_TIMEOUT="$2"
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
WAITFORIT_TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
WAITFORIT_CLI=("$@")
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
echoerr "Error: you need to provide a host and port to test."
usage
fi
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
# Check to see if timeout is from busybox?
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
WAITFORIT_BUSYTIMEFLAG=""
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
WAITFORIT_ISBUSY=1
# Check if busybox timeout uses -t flag
# (recent Alpine versions don't support -t anymore)
if timeout &>/dev/stdout | grep -q -e '-t '; then
WAITFORIT_BUSYTIMEFLAG="-t"
fi
else
WAITFORIT_ISBUSY=0
fi
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
wait_for
WAITFORIT_RESULT=$?
exit $WAITFORIT_RESULT
else
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
wait_for_wrapper
WAITFORIT_RESULT=$?
else
wait_for
WAITFORIT_RESULT=$?
fi
fi
if [[ $WAITFORIT_CLI != "" ]]; then
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
exit $WAITFORIT_RESULT
fi
exec "${WAITFORIT_CLI[@]}"
else
exit $WAITFORIT_RESULT
fi

View File

@ -1,4 +1,3 @@
# TODO: Use a node alpine image
FROM cerc/foundry:local
# Install node (local foundry is a debian based image)

View File

@ -25,7 +25,7 @@ RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
FROM alpine:3.15
RUN apk add --no-cache jq
RUN apk add --no-cache jq bash
COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin

View File

@ -8,12 +8,15 @@ Clone required repositories:
```bash
laconic-so --stack fixturenet-optimism setup-repositories
# Exclude cerc-io/go-ethereum repository if running L1 separately
laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum
```
Checkout to the required versions and branches in repos:
```bash
# optimism
# Optimism
cd ~/cerc/optimism
git checkout @eth-optimism/sdk@0.0.0-20230329025055
```
@ -22,6 +25,9 @@ Build the container images:
```bash
laconic-so --stack fixturenet-optimism build-containers
# Only build containers required for L2 if running L1 separately
laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher
```
This should create the required docker images in the local image registry:
@ -37,19 +43,36 @@ This should create the required docker images in the local image registry:
## Deploy
(Optional) Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params if running L1 separately
* NOTE:
* Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file
* If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port
Deploy the stack:
```bash
laconic-so --stack fixturenet-optimism deploy up
# Only start fixturenet-optimism pod (L2) if running L1 separately
laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism
```
To list down the running containers:
The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it:
1. waits for the 'Merge' to happen on L1
2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups)
3. deploys the L1 contracts
To list down and monitor the running containers:
```bash
laconic-so --stack fixturenet-optimism deploy ps
# With status
docker ps
# Check logs for a container
docker logs -f <CONTAINER_ID>
```
## Clean up
@ -58,23 +81,24 @@ Stop all services running in the background:
```bash
laconic-so --stack fixturenet-optimism deploy down
# If only ran fixturenet-optimism pod (L2)
laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism
```
Remove volumes created by this stack:
Clear volumes created by this stack:
```bash
docker volume ls
# List all relevant volumes
docker volume ls -q --filter name=laconic*
docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_fixturenet-geth-accounts
docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_l1-deployment
docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_l2-accounts
docker volume rm laconic-d527651bba3cb61886b36a7400bd2a38_op_node_data
# Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter name=laconic*)
```
## Known Issues
* Currently not supported:
* Stopping and restarting the stack from where it left off; currently starts fresh on a restart
* Pointing Optimism (L2) to external L1 endpoint to allow running only L2 services
* Resource requirements (memory + time) for building `cerc/foundry` image are on the higher side
* `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation)

View File

@ -13,9 +13,9 @@ containers:
- cerc/fixturenet-eth-lighthouse
- cerc/foundry
- cerc/optimism-contracts
- cerc/optimism-op-node
- cerc/optimism-l2geth
- cerc/optimism-op-batcher
- cerc/optimism-op-node
pods:
- fixturenet-eth
- fixturenet-optimism

View File

@ -31,6 +31,10 @@ git checkout laconic
# MobyMask
cd ~/cerc/MobyMask
git checkout v0.1.1
# Optimism
cd ~/cerc/optimism
git checkout @eth-optimism/sdk@0.0.0-20230329025055
```
Build the container images:
@ -43,29 +47,33 @@ This should create the required docker images in the local image registry.
Deploy the stack:
* Deploy the containers
* Deploy the containers:
```bash
laconic-so --stack mobymask-v2 deploy-system up
```
* Check that all containers are healthy using `docker ps`
* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy`
NOTE: The `mobymask-ui` container might not start. If mobymask-app is not running at http://localhost:3002, run command again to start the container
NOTE: The `mobymask-app` container might not start; if the app is not running at http://localhost:3002, restart the container using it's id:
```bash
laconic-so --stack mobymask-v2 deploy-system up
docker ps -a | grep "mobymask-app"
docker restart <CONTAINER_ID>
```
## Tests
Find the watcher container's id:
Find the watcher container's id and export it for later use:
```bash
laconic-so --stack mobymask-v2 deploy-system ps | grep "mobymask-watcher-server"
export CONTAINER_ID=<CONTAINER_ID>
```
Example output
Example output:
```
id: 5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c, name: laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp
@ -73,12 +81,6 @@ id: 5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c, name: laco
In above output the container ID is `5d3aae4b22039fcd1c9b18feeb91318ede1100581e75bb5ac54f9e436066b02c`
Export it for later use:
```bash
export CONTAINER_ID=<CONTAINER_ID>
```
Run the peer tests:
```bash
@ -87,7 +89,11 @@ docker exec -w /app/packages/peer $CONTAINER_ID yarn test
## Web Apps
Check that the status for web-app containers are healthy by using `docker ps`
Check that the web-app containers are healthy:
```bash
docker ps | grep -E 'mobymask-app|peer-test-app'
```
### mobymask-app
@ -119,15 +125,14 @@ laconic-so --stack mobymask-v2 deploy-system down
Clear volumes:
* List all volumes
* List all relevant volumes:
```bash
docker volume ls
docker volume ls -q --filter name=laconic*
```
* Remove volumes created by this stack
* Remove all the listed volumes:
Example:
```bash
docker volume rm laconic-bfb01caf98b1b8f7c8db4d33f11b905a_moby_data_server
docker volume rm $(docker volume ls -q --filter name=laconic*)
```

View File

@ -1,22 +1,22 @@
# Demo
* Get the root invite link URL for mobymask-app
* Get the root invite link URL for mobymask-app:
```
```bash
laconic-so --stack mobymask-v2 deploy-system logs mobymask
```
The invite link is seen at the end of the logs
Example:
```
The invite link is seen at the end of the logs. Example log:
```bash
laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-1 | http://127.0.0.1:3002/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D
```
* Open the invite link in browser to use the mobymask-app.
* Open the invite link in a browser to use the mobymask-app.
NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations
* In the debug panel, check if it is connected to the p2p network (It should be connected to atleast one other peer for pubsub to work).
* In the debug panel, check if it is connected to the p2p network (it should be connected to at least one other peer for pubsub to work).
* Create an invite link in the app by clicking on `Create new invite link` button.
@ -31,24 +31,24 @@
* In a terminal check logs from the watcher peer container.
* Get the container id
* Get the container id:
```bash
laconic-so --stack mobymask-v2 deploy-system ps | grep mobymask-watcher-server
```
* Check logs
* Check logs:
```bash
docker logs -f CONTAINER_ID
docker logs -f <CONTAINER_ID>
```
* It should have received the message, sent transaction to L2 chain and received a transaction receipt with block details.
* It should have received the message, sent transaction to L2 chain and received a transaction receipt for an `invoke` message with block details.
Example log:
```
2023-03-23T10:25:19.771Z vulcanize:peer-listener [10:25:19] Received a message on mobymask P2P network from peer: PeerId(12D3KooWAVNswtcrX12iDYukEoxdQwD34kJyRWcQTfZ4unGg2xjd)
```bash
2023-03-23T10:25:19.771Z vulcanize:peer-listener [10:25:19] Received a message on mobymask P2P network from peer: 12D3KooWAVNswtcrX12iDYukEoxdQwD34kJyRWcQTfZ4unGg2xjd
2023-03-23T10:25:24.143Z laconic:libp2p-utils Transaction receipt for invoke message {
to: '0x558024C7d593B840E1BfD83E9B287a5CDad4db15',
blockNumber: 1996,
@ -60,7 +60,7 @@
```
* Check the phisher in watcher GQL: http://localhost:3001/graphql
* Use the blockHash from transaction receipt details or query for latest block
* Use the blockHash from transaction receipt details or query for latest block:
```gql
query {
@ -71,7 +71,7 @@
}
```
* Get the deployed contract address
* Get the deployed contract address:
```bash
laconic-so --stack mobymask-v2 deploy-system exec mobymask-app "cat src/config.json"
@ -94,7 +94,7 @@
}
```
It should return true for reported phisher names.
It should return `true` for reported phisher names.
* Watcher internally is using L2 chain `eth_getStorageAt` method.
@ -107,7 +107,7 @@
* Revocation messages can be seen in the debug panel `MESSAGES` tab of other browsers.
* Check the watcher peer logs. It should receive a message and log the transaction receipt for revoke message.
* Check the watcher peer logs. It should receive a message and log the transaction receipt for a `revoke` message.
* Try reporting a phisher from the revoked invitee's browser.
@ -129,4 +129,4 @@
}
```
It should return false as the invitation/delegation used for reporting phishers has been revoked.
It should return `false` as the invitation/delegation used for reporting phishers has been revoked.