forked from cerc-io/stack-orchestrator
Merge branch 'main' into ci-test
Former-commit-id: 4eb970350bbfca04ea98b21a6aaf0acfa3daa8cd
This commit is contained in:
commit
069dd50fd3
@ -49,6 +49,8 @@ services:
|
|||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 10
|
retries: 10
|
||||||
start_period: 3s
|
start_period: 3s
|
||||||
|
environment:
|
||||||
|
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-eth/fixturenet-eth.env
|
- ../config/fixturenet-eth/fixturenet-eth.env
|
||||||
image: cerc/fixturenet-eth-geth:local
|
image: cerc/fixturenet-eth-geth:local
|
||||||
|
@ -31,7 +31,7 @@ services:
|
|||||||
- l2_accounts:/l2-accounts
|
- l2_accounts:/l2-accounts
|
||||||
- l1_deployment:/app/packages/contracts-bedrock
|
- l1_deployment:/app/packages/contracts-bedrock
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Generates the config files required for L2 (outputs to volume l2_config)
|
# Generates the config files required for L2 (outputs to volume l2_config)
|
||||||
op-node-l2-config-gen:
|
op-node-l2-config-gen:
|
||||||
@ -50,7 +50,7 @@ services:
|
|||||||
- l2_config:/app
|
- l2_config:/app
|
||||||
command: ["sh", "/app/generate-l2-config.sh"]
|
command: ["sh", "/app/generate-l2-config.sh"]
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
||||||
op-geth:
|
op-geth:
|
||||||
@ -122,8 +122,35 @@ services:
|
|||||||
# Waits for L1 endpoint to be up before running the batcher
|
# Waits for L1 endpoint to be up before running the batcher
|
||||||
command: |
|
command: |
|
||||||
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8548:8548"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
# Runs the proposer (periodically submits new state roots to L1)
|
||||||
|
op-proposer:
|
||||||
|
image: cerc/optimism-op-proposer:local
|
||||||
|
depends_on:
|
||||||
|
op-node:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file:
|
||||||
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
|
environment:
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
|
volumes:
|
||||||
|
- ../config/wait-for-it.sh:/wait-for-it.sh
|
||||||
|
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
|
||||||
|
- l1_deployment:/contracts-bedrock:ro
|
||||||
|
- l2_accounts:/l2-accounts:ro
|
||||||
|
entrypoint: ["sh", "-c"]
|
||||||
|
# Waits for L1 endpoint to be up before running the proposer
|
||||||
|
command: |
|
||||||
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8560:8560"
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
l1_deployment:
|
l1_deployment:
|
||||||
|
@ -15,12 +15,7 @@ services:
|
|||||||
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
||||||
CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
|
CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
|
||||||
working_dir: /scripts
|
working_dir: /scripts
|
||||||
# Waits for watcher server to be up before app build
|
command: ["sh", "mobymask-app-start.sh"]
|
||||||
# Required when running with watcher stack to get deployed contract address
|
|
||||||
command:
|
|
||||||
- sh
|
|
||||||
- -c
|
|
||||||
- ./wait-for-it.sh -h ${CERC_WATCHER_HOST:-$${DEFAULT_CERC_WATCHER_HOST}} -p ${CERC_WATCHER_PORT:-$${DEFAULT_CERC_WATCHER_PORT}} -s -t 0 -- ./mobymask-app-start.sh
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
||||||
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
|
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
|
||||||
@ -51,12 +46,7 @@ services:
|
|||||||
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
||||||
CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
|
CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
|
||||||
working_dir: /scripts
|
working_dir: /scripts
|
||||||
# Waits for watcher server to be up before app build
|
command: ["sh", "mobymask-app-start.sh"]
|
||||||
# Required when running with watcher stack to get deployed contract address
|
|
||||||
command:
|
|
||||||
- sh
|
|
||||||
- -c
|
|
||||||
- ./wait-for-it.sh -h ${CERC_WATCHER_HOST:-$${DEFAULT_CERC_WATCHER_HOST}} -p ${CERC_WATCHER_PORT:-$${DEFAULT_CERC_WATCHER_PORT}} -s -t 0 -- ./mobymask-app-start.sh
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
||||||
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
|
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
|
||||||
|
@ -10,10 +10,7 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
|
||||||
command:
|
command: ["sh", "test-app-start.sh"]
|
||||||
- sh
|
|
||||||
- -c
|
|
||||||
- ./wait-for-it.sh -h ${CERC_WATCHER_HOST:-$${DEFAULT_CERC_WATCHER_HOST}} -p ${CERC_WATCHER_PORT:-$${DEFAULT_CERC_WATCHER_PORT}} -s -t 0 -- ./test-app-start.sh
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
|
||||||
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
|
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
|
||||||
|
@ -76,6 +76,13 @@ else
|
|||||||
echo "Couldn't fetch L1 account credentials, using them from env"
|
echo "Couldn't fetch L1 account credentials, using them from env"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Send balances to the above L2 addresses
|
||||||
|
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
|
||||||
|
echo "Balances sent to L2 accounts"
|
||||||
|
|
||||||
# Select a finalized L1 block as the starting point for roll ups
|
# Select a finalized L1 block as the starting point for roll ups
|
||||||
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
|
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
|
||||||
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
|
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
|
||||||
@ -88,13 +95,6 @@ L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
|
|||||||
|
|
||||||
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
|
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
|
||||||
|
|
||||||
# Send balances to the above L2 addresses
|
|
||||||
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
|
||||||
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
|
||||||
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
|
||||||
|
|
||||||
echo "Balances sent to L2 accounts"
|
|
||||||
|
|
||||||
# Update the deployment config
|
# Update the deployment config
|
||||||
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
|
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
|
||||||
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
|
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
|
||||||
@ -110,7 +110,7 @@ echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
|
|||||||
echo "Deploying the L1 smart contracts, this will take a while..."
|
echo "Deploying the L1 smart contracts, this will take a while..."
|
||||||
|
|
||||||
# Deploy the L1 smart contracts
|
# Deploy the L1 smart contracts
|
||||||
yarn hardhat deploy --network getting-started
|
yarn hardhat deploy --network getting-started --tags l1
|
||||||
|
|
||||||
echo "Deployed the L1 smart contracts"
|
echo "Deployed the L1 smart contracts"
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ fi
|
|||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
# Get BACTHER_KEY from keys.json
|
# Get Batcher key from keys.json
|
||||||
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
|
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
cleanup() {
|
cleanup() {
|
||||||
@ -18,6 +18,7 @@ cleanup() {
|
|||||||
}
|
}
|
||||||
trap 'cleanup' INT TERM
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-batcher
|
||||||
op-batcher \
|
op-batcher \
|
||||||
--l2-eth-rpc=http://op-geth:8545 \
|
--l2-eth-rpc=http://op-geth:8545 \
|
||||||
--rollup-rpc=http://op-node:8547 \
|
--rollup-rpc=http://op-node:8547 \
|
||||||
@ -30,7 +31,6 @@ op-batcher \
|
|||||||
--rpc.port=8548 \
|
--rpc.port=8548 \
|
||||||
--rpc.enable-admin \
|
--rpc.enable-admin \
|
||||||
--max-channel-duration=1 \
|
--max-channel-duration=1 \
|
||||||
--target-l1-tx-size-bytes=2048 \
|
|
||||||
--l1-eth-rpc=$CERC_L1_RPC \
|
--l1-eth-rpc=$CERC_L1_RPC \
|
||||||
--private-key=$BATCHER_KEY \
|
--private-key=$BATCHER_KEY \
|
||||||
&
|
&
|
||||||
|
@ -8,7 +8,7 @@ fi
|
|||||||
echo "Installing jq"
|
echo "Installing jq"
|
||||||
apk update && apk add jq
|
apk update && apk add jq
|
||||||
|
|
||||||
# Get SEQUENCER key from keys.json
|
# Get Sequencer key from keys.json
|
||||||
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
# Initialize op-geth if datadir/geth not found
|
# Initialize op-geth if datadir/geth not found
|
||||||
@ -70,7 +70,7 @@ geth \
|
|||||||
--ws.origins="*" \
|
--ws.origins="*" \
|
||||||
--ws.api=debug,eth,txpool,net,engine \
|
--ws.api=debug,eth,txpool,net,engine \
|
||||||
--syncmode=full \
|
--syncmode=full \
|
||||||
--gcmode=full \
|
--gcmode=archive \
|
||||||
--nodiscover \
|
--nodiscover \
|
||||||
--maxpeers=0 \
|
--maxpeers=0 \
|
||||||
--networkid=42069 \
|
--networkid=42069 \
|
||||||
|
@ -6,9 +6,10 @@ fi
|
|||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
# Get SEQUENCER KEY from keys.json
|
# Get Sequencer key from keys.json
|
||||||
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
|
# Run op-node
|
||||||
op-node \
|
op-node \
|
||||||
--l2=http://op-geth:8551 \
|
--l2=http://op-geth:8551 \
|
||||||
--l2.jwt-secret=/op-node-data/jwt.txt \
|
--l2.jwt-secret=/op-node-data/jwt.txt \
|
||||||
|
36
app/data/config/fixturenet-optimism/run-op-proposer.sh
Executable file
36
app/data/config/fixturenet-optimism/run-op-proposer.sh
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
|
# Read the L2OutputOracle contract address from the deployment
|
||||||
|
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
|
||||||
|
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
|
||||||
|
|
||||||
|
# Get Proposer key from keys.json
|
||||||
|
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${proposer_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-proposer
|
||||||
|
op-proposer \
|
||||||
|
--poll-interval 12s \
|
||||||
|
--rpc.port 8560 \
|
||||||
|
--rollup-rpc http://op-node:8547 \
|
||||||
|
--l2oo-address $L2OO_ADDR \
|
||||||
|
--private-key $PROPOSER_KEY \
|
||||||
|
--l1-eth-rpc $CERC_L1_RPC \
|
||||||
|
&
|
||||||
|
|
||||||
|
proposer_pid=$!
|
||||||
|
wait $proposer_pid
|
@ -48,7 +48,7 @@ CERC_L2_GETH_PORT="${CERC_L2_GETH_PORT:-${DEFAULT_CERC_L2_GETH_PORT}}"
|
|||||||
CERC_L2_NODE_HOST="${CERC_L2_NODE_HOST:-${DEFAULT_CERC_L2_NODE_HOST}}"
|
CERC_L2_NODE_HOST="${CERC_L2_NODE_HOST:-${DEFAULT_CERC_L2_NODE_HOST}}"
|
||||||
CERC_L2_NODE_PORT="${CERC_L2_NODE_PORT:-${DEFAULT_CERC_L2_NODE_PORT}}"
|
CERC_L2_NODE_PORT="${CERC_L2_NODE_PORT:-${DEFAULT_CERC_L2_NODE_PORT}}"
|
||||||
./wait-for-it.sh -h "${CERC_L2_GETH_HOST}" -p "${CERC_L2_GETH_PORT}" -s -t 0
|
./wait-for-it.sh -h "${CERC_L2_GETH_HOST}" -p "${CERC_L2_GETH_PORT}" -s -t 0
|
||||||
./wait-for-it.sh -h "${CERC_L2_GETH_PORT}" -p "${CERC_L2_NODE_PORT}" -s -t 0
|
./wait-for-it.sh -h "${CERC_L2_NODE_HOST}" -p "${CERC_L2_NODE_PORT}" -s -t 0
|
||||||
|
|
||||||
export RPC_URL="${CERC_L2_GETH_RPC}"
|
export RPC_URL="${CERC_L2_GETH_RPC}"
|
||||||
|
|
||||||
|
@ -17,9 +17,13 @@ fi
|
|||||||
|
|
||||||
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
|
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
|
||||||
|
|
||||||
# Use config from mounted volume if available (when running web-app along with watcher stack)
|
if [ -z "$CERC_DEPLOYED_CONTRACT" ]; then
|
||||||
if [ -f /server/config.json ]; then
|
# Use config from mounted volume (when running web-app along with watcher stack)
|
||||||
echo "Taking config for deployed contract from mounted volume"
|
echo "Taking config for deployed contract from mounted volume"
|
||||||
|
while [ ! -f /server/config.json ]; do
|
||||||
|
echo "Config not found, retrying after 5 seconds"
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
# Get deployed contract address and chain id
|
# Get deployed contract address and chain id
|
||||||
CERC_DEPLOYED_CONTRACT=$(jq -r '.address' /server/config.json | tr -d '"')
|
CERC_DEPLOYED_CONTRACT=$(jq -r '.address' /server/config.json | tr -d '"')
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# Defaults
|
# Defaults
|
||||||
|
|
||||||
# Watcher endpoint
|
# Watcher endpoint
|
||||||
DEFAULT_CERC_WATCHER_HOST="mobymask-watcher-server"
|
|
||||||
DEFAULT_CERC_WATCHER_PORT=3001
|
|
||||||
DEFAULT_CERC_APP_WATCHER_URL="http://localhost:3001"
|
DEFAULT_CERC_APP_WATCHER_URL="http://localhost:3001"
|
||||||
|
|
||||||
# Set of relay peers to connect to from the relay node
|
# Set of relay peers to connect to from the relay node
|
||||||
|
@ -127,3 +127,9 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
wait $geth_pid
|
wait $geth_pid
|
||||||
|
|
||||||
|
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
|
||||||
|
while [ 1 -eq 1 ]; do
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Exports the complete fixturenet-eth ethdb data to a tarball (default, ./ethdb.tgz), waiting for a minimum
|
||||||
|
# block height (default 1000) to be reached before exporting.
|
||||||
|
|
||||||
|
# Usage: export-ethdb.sh [min_block_number=1000] [output_file=./ethdb.tgz]
|
||||||
|
|
||||||
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
|
GETH_EXPORT_MIN_BLOCK=${1:-${GETH_EXPORT_MIN_BLOCK:-1000}}
|
||||||
|
|
||||||
|
# Wait for block.
|
||||||
|
${SCRIPT_DIR}/status.sh $GETH_EXPORT_MIN_BLOCK
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "Unable to export ethdb." 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop geth.
|
||||||
|
echo -n "Exporting ethdb.... "
|
||||||
|
GETH_CONTAINER=`docker ps -q -f "name=${CERC_SO_COMPOSE_PROJECT}-fixturenet-eth-geth-2-1"`
|
||||||
|
if [[ -z "$GETH_CONTAINER" ]]; then
|
||||||
|
echo "not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker exec $GETH_CONTAINER sh -c 'rm -rf /root/tmp && mkdir -p /root/tmp/export'
|
||||||
|
docker exec $GETH_CONTAINER sh -c 'ln -s /opt/testnet/build/el/geth.json /root/tmp/export/genesis.json && ln -s /root/ethdata /root/tmp/export/'
|
||||||
|
docker exec $GETH_CONTAINER sh -c 'cat /root/tmp/export/genesis.json | jq ".config" > /root/tmp/export/genesis.config.json'
|
||||||
|
|
||||||
|
# Stop geth and zip up ethdb.
|
||||||
|
docker exec $GETH_CONTAINER sh -c 'curl -s --location "localhost:8545" --header "Content-Type: application/json" --data "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"eth_getBlockByNumber\", \"params\": [\"0x0\", false]}" > /root/tmp/export/eth_getBlockByNumber_0x0.json'
|
||||||
|
docker exec $GETH_CONTAINER sh -c 'curl -s --location "localhost:8545" --header "Content-Type: application/json" --data "{\"jsonrpc\": \"2.0\", \"id\": 1, \"method\": \"eth_blockNumber\", \"params\": []}" > /root/tmp/export/eth_blockNumber.json'
|
||||||
|
docker exec $GETH_CONTAINER sh -c "killall geth && sleep 2 && tar chzf /root/tmp/ethdb.tgz -C /root/tmp/export ."
|
||||||
|
|
||||||
|
# Copy ethdb to host.
|
||||||
|
GETH_EXPORT_FILE=${2:-${GETH_EXPORT_FILE:-./ethdb.tgz}}
|
||||||
|
docker cp $GETH_CONTAINER:/root/tmp/ethdb.tgz $GETH_EXPORT_FILE
|
||||||
|
echo "$GETH_EXPORT_FILE"
|
||||||
|
docker exec $GETH_CONTAINER sh -c "rm -rf /root/tmp"
|
||||||
|
|
||||||
|
# Restart the container to get geth back up and running.
|
||||||
|
docker restart $GETH_CONTAINER >/dev/null
|
@ -2,9 +2,10 @@
|
|||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
STATUSES=("geth to generate DAG" "beacon phase0" "beacon altair" "beacon bellatrix pre-merge" "beacon bellatrix merge")
|
|
||||||
STATUS=0
|
|
||||||
|
|
||||||
|
MIN_BLOCK_NUM=${1:-${MIN_BLOCK_NUM:-3}}
|
||||||
|
STATUSES=("geth to generate DAG" "beacon phase0" "beacon altair" "beacon bellatrix pre-merge" "beacon bellatrix merge" "block number $MIN_BLOCK_NUM")
|
||||||
|
STATUS=0
|
||||||
|
|
||||||
LIGHTHOUSE_BASE_URL=${LIGHTHOUSE_BASE_URL}
|
LIGHTHOUSE_BASE_URL=${LIGHTHOUSE_BASE_URL}
|
||||||
GETH_BASE_URL=${GETH_BASE_URL}
|
GETH_BASE_URL=${GETH_BASE_URL}
|
||||||
@ -13,18 +14,29 @@ GETH_BASE_URL=${GETH_BASE_URL}
|
|||||||
# or some execution environment-neutral mechanism.
|
# or some execution environment-neutral mechanism.
|
||||||
if [ -z "$LIGHTHOUSE_BASE_URL" ]; then
|
if [ -z "$LIGHTHOUSE_BASE_URL" ]; then
|
||||||
LIGHTHOUSE_CONTAINER=`docker ps -q -f "name=fixturenet-eth-lighthouse-1-1"`
|
LIGHTHOUSE_CONTAINER=`docker ps -q -f "name=fixturenet-eth-lighthouse-1-1"`
|
||||||
|
if [ -z "$LIGHTHOUSE_CONTAINER" ]; then
|
||||||
|
echo "Lighthouse container not found." 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
LIGHTHOUSE_PORT=`docker port $LIGHTHOUSE_CONTAINER 8001 | cut -d':' -f2`
|
LIGHTHOUSE_PORT=`docker port $LIGHTHOUSE_CONTAINER 8001 | cut -d':' -f2`
|
||||||
LIGHTHOUSE_BASE_URL="http://localhost:${LIGHTHOUSE_PORT}"
|
LIGHTHOUSE_BASE_URL="http://localhost:${LIGHTHOUSE_PORT}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$GETH_BASE_URL" ]; then
|
if [ -z "$GETH_BASE_URL" ]; then
|
||||||
GETH_CONTAINER=`docker ps -q -f "name=fixturenet-eth-geth-1-1"`
|
GETH_CONTAINER=`docker ps -q -f "name=fixturenet-eth-geth-1-1"`
|
||||||
|
if [ -z "$GETH_CONTAINER" ]; then
|
||||||
|
echo "Lighthouse container not found." 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
GETH_PORT=`docker port $GETH_CONTAINER 8545 | cut -d':' -f2`
|
GETH_PORT=`docker port $GETH_CONTAINER 8545 | cut -d':' -f2`
|
||||||
GETH_BASE_URL="http://localhost:${GETH_PORT}"
|
GETH_BASE_URL="http://localhost:${GETH_PORT}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
MARKER="."
|
||||||
|
|
||||||
function inc_status() {
|
function inc_status() {
|
||||||
echo " done"
|
echo " done"
|
||||||
|
MARKEr="."
|
||||||
STATUS=$((STATUS + 1))
|
STATUS=$((STATUS + 1))
|
||||||
if [ $STATUS -lt ${#STATUSES[@]} ]; then
|
if [ $STATUS -lt ${#STATUSES[@]} ]; then
|
||||||
echo -n "Waiting for ${STATUSES[$STATUS]}..."
|
echo -n "Waiting for ${STATUSES[$STATUS]}..."
|
||||||
@ -34,7 +46,7 @@ function inc_status() {
|
|||||||
echo -n "Waiting for ${STATUSES[$STATUS]}..."
|
echo -n "Waiting for ${STATUSES[$STATUS]}..."
|
||||||
while [ $STATUS -lt ${#STATUSES[@]} ]; do
|
while [ $STATUS -lt ${#STATUSES[@]} ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
echo -n "."
|
echo -n "$MARKER"
|
||||||
case $STATUS in
|
case $STATUS in
|
||||||
0)
|
0)
|
||||||
result=`wget --no-check-certificate --quiet -O - --method POST --header 'Content-Type: application/json' \
|
result=`wget --no-check-certificate --quiet -O - --method POST --header 'Content-Type: application/json' \
|
||||||
@ -67,5 +79,13 @@ while [ $STATUS -lt ${#STATUSES[@]} ]; do
|
|||||||
inc_status
|
inc_status
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
5)
|
||||||
|
result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.data.message.body.execution_payload.block_number'`
|
||||||
|
if [ ! -z "$result" ] && [ $result -gt $MIN_BLOCK_NUM ]; then
|
||||||
|
inc_status
|
||||||
|
else
|
||||||
|
MARKER="$result "
|
||||||
|
fi
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
@ -52,7 +52,7 @@ RUN yarn global add http-server
|
|||||||
# Install old version of MobyMask web app
|
# Install old version of MobyMask web app
|
||||||
RUN yarn global add @cerc-io/mobymask-ui@0.1.3
|
RUN yarn global add @cerc-io/mobymask-ui@0.1.3
|
||||||
# Install the LXDAO version of MobyMask web app
|
# Install the LXDAO version of MobyMask web app
|
||||||
RUN yarn global add @cerc-io/mobymask-ui-lxdao@npm:@cerc-io/mobymask-ui@0.1.3-lxdao-0.1.0
|
RUN yarn global add @cerc-io/mobymask-ui-lxdao@npm:@cerc-io/mobymask-ui@0.1.3-lxdao-0.1.1
|
||||||
|
|
||||||
# Expose port for http
|
# Expose port for http
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Build cerc/optimism-l2geth
|
# Build cerc/optimism-l2geth
|
||||||
|
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
docker build -t cerc/optimism-l2geth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/op-geth
|
docker build -t cerc/optimism-l2geth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/op-geth
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Build cerc/optimism-op-batcher
|
# Build cerc/optimism-op-batcher
|
||||||
# TODO: use upstream Dockerfile once its buildx-specific content has been removed
|
# TODO: use upstream Dockerfile once its buildx-specific content has been removed
|
||||||
|
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/optimism-op-batcher:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism
|
docker build -t cerc/optimism-op-batcher:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Build cerc/optimism-op-node
|
# Build cerc/optimism-op-node
|
||||||
# TODO: use upstream Dockerfile once its buildx-specific content has been removed
|
# TODO: use upstream Dockerfile once its buildx-specific content has been removed
|
||||||
|
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/optimism-op-node:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism
|
docker build -t cerc/optimism-op-node:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism
|
||||||
|
@ -0,0 +1,31 @@
|
|||||||
|
FROM golang:1.19.0-alpine3.15 as builder
|
||||||
|
|
||||||
|
ARG VERSION=v0.0.0
|
||||||
|
|
||||||
|
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
|
||||||
|
|
||||||
|
# build op-proposer with the shared go.mod & go.sum files
|
||||||
|
COPY ./op-proposer /app/op-proposer
|
||||||
|
COPY ./op-bindings /app/op-bindings
|
||||||
|
COPY ./op-node /app/op-node
|
||||||
|
COPY ./op-service /app/op-service
|
||||||
|
COPY ./op-signer /app/op-signer
|
||||||
|
COPY ./go.mod /app/go.mod
|
||||||
|
COPY ./go.sum /app/go.sum
|
||||||
|
COPY ./.git /app/.git
|
||||||
|
|
||||||
|
WORKDIR /app/op-proposer
|
||||||
|
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
ARG TARGETOS TARGETARCH
|
||||||
|
|
||||||
|
RUN make op-proposer VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
|
||||||
|
|
||||||
|
FROM alpine:3.15
|
||||||
|
|
||||||
|
RUN apk add --no-cache jq bash
|
||||||
|
|
||||||
|
COPY --from=builder /app/op-proposer/bin/op-proposer /usr/local/bin
|
||||||
|
|
||||||
|
CMD ["op-proposer"]
|
8
app/data/container-build/cerc-optimism-op-proposer/build.sh
Executable file
8
app/data/container-build/cerc-optimism-op-proposer/build.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Build cerc/optimism-op-proposer
|
||||||
|
|
||||||
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
|
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
docker build -t cerc/optimism-op-proposer:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism
|
@ -14,7 +14,7 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN echo "Building watcher-ts" && \
|
RUN echo "Building mobymask-v2-watcher-ts" && \
|
||||||
yarn && yarn build
|
yarn && yarn build
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
@ -35,3 +35,4 @@ cerc/act-runner-task-executor
|
|||||||
cerc/optimism-l2geth
|
cerc/optimism-l2geth
|
||||||
cerc/optimism-op-batcher
|
cerc/optimism-op-batcher
|
||||||
cerc/optimism-op-node
|
cerc/optimism-op-node
|
||||||
|
cerc/optimism-op-proposer
|
||||||
|
@ -10,7 +10,6 @@ laconicd
|
|||||||
fixturenet-laconicd
|
fixturenet-laconicd
|
||||||
fixturenet-eth
|
fixturenet-eth
|
||||||
fixturenet-eth-metrics
|
fixturenet-eth-metrics
|
||||||
watcher-ts
|
|
||||||
watcher-mobymask
|
watcher-mobymask
|
||||||
watcher-erc20
|
watcher-erc20
|
||||||
watcher-erc721
|
watcher-erc721
|
||||||
|
@ -13,8 +13,6 @@ cerc-io/laconic-console
|
|||||||
cerc-io/mobymask-watcher
|
cerc-io/mobymask-watcher
|
||||||
cerc-io/watcher-ts
|
cerc-io/watcher-ts
|
||||||
cerc-io/mobymask-v2-watcher-ts
|
cerc-io/mobymask-v2-watcher-ts
|
||||||
cerc-io/react-peer
|
|
||||||
cerc-io/mobymask-ui
|
|
||||||
cerc-io/MobyMask
|
cerc-io/MobyMask
|
||||||
vulcanize/uniswap-watcher-ts
|
vulcanize/uniswap-watcher-ts
|
||||||
vulcanize/uniswap-v3-info
|
vulcanize/uniswap-v3-info
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
version: "1.1"
|
version: "1.2"
|
||||||
name: build-support
|
name: build-support
|
||||||
decription: "Build Support Components"
|
decription: "Build Support Components"
|
||||||
containers:
|
containers:
|
||||||
- cerc/builder-js
|
- cerc/builder-js
|
||||||
- cerc/builder-gerbil
|
|
||||||
|
37
app/data/stacks/fixturenet-eth-tx/README.md
Normal file
37
app/data/stacks/fixturenet-eth-tx/README.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# fixturenet-eth-tx
|
||||||
|
|
||||||
|
A variation of `fixturenet-eth` that automatically generates transactions using `tx-spammer`.
|
||||||
|
|
||||||
|
See `stacks/fixturenet-eth/README.md` for more information.
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
* cerc/go-ethereum
|
||||||
|
* cerc/lighthouse
|
||||||
|
* cerc/fixturenet-eth-geth
|
||||||
|
* cerc/fixturenet-eth-lighthouse
|
||||||
|
* cerc/tx-spammer
|
||||||
|
|
||||||
|
## Deploy the stack
|
||||||
|
```
|
||||||
|
$ laconic-so --stack fixturenet-eth-tx setup-repositories
|
||||||
|
$ laconic-so --stack fixturenet-eth-tx build-containers
|
||||||
|
$ laconic-so --stack fixturenet-eth-tx deploy up
|
||||||
|
```
|
||||||
|
|
||||||
|
## Export the ethdb (optional)
|
||||||
|
|
||||||
|
It is easy to export data from the fixturenet for offline processing of the raw ethdb files (eg, by eth-statediff-service) using the `export-ethdb.sh` script.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh 500
|
||||||
|
Waiting for geth to generate DAG.... done
|
||||||
|
Waiting for beacon phase0.... done
|
||||||
|
Waiting for beacon altair.... done
|
||||||
|
Waiting for beacon bellatrix pre-merge.... done
|
||||||
|
Waiting for beacon bellatrix merge.... done
|
||||||
|
Waiting for block number 500.... done
|
||||||
|
Exporting ethdb.... ./ethdb.tgz
|
||||||
|
```
|
18
app/data/stacks/fixturenet-eth-tx/stack.yml
Normal file
18
app/data/stacks/fixturenet-eth-tx/stack.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
version: "1.2"
|
||||||
|
name: fixturenet-eth-tx
|
||||||
|
decription: "Ethereum Fixturenet w/ tx-spammer"
|
||||||
|
repos:
|
||||||
|
- cerc-io/go-ethereum
|
||||||
|
- cerc-io/tx-spammer
|
||||||
|
- dboreham/foundry
|
||||||
|
containers:
|
||||||
|
- cerc/go-ethereum
|
||||||
|
- cerc/lighthouse
|
||||||
|
- cerc/fixturenet-eth-geth
|
||||||
|
- cerc/fixturenet-eth-lighthouse
|
||||||
|
- cerc/tx-spammer
|
||||||
|
- cerc/foundry
|
||||||
|
pods:
|
||||||
|
- fixturenet-eth
|
||||||
|
- foundry
|
||||||
|
- tx-spammer
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2)
|
Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2)
|
||||||
|
|
||||||
We support running just the L2 part of stack, given an external L1 endpoint. Follow [l2-only](./l2-only.md) for the same.
|
We support running just the L2 part of stack, given an external L1 endpoint. Follow the [L2 only doc](./l2-only.md) for the same.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ Checkout to the required versions and branches in repos:
|
|||||||
```bash
|
```bash
|
||||||
# Optimism
|
# Optimism
|
||||||
cd ~/cerc/optimism
|
cd ~/cerc/optimism
|
||||||
git checkout @eth-optimism/sdk@0.0.0-20230329025055
|
git checkout v1.0.4
|
||||||
```
|
```
|
||||||
|
|
||||||
Build the container images:
|
Build the container images:
|
||||||
@ -28,6 +28,8 @@ Build the container images:
|
|||||||
laconic-so --stack fixturenet-optimism build-containers
|
laconic-so --stack fixturenet-optimism build-containers
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note: this will take >10 mins depending on the specs of your machine, and **requires** 16GB of memory or greater.
|
||||||
|
|
||||||
This should create the required docker images in the local image registry:
|
This should create the required docker images in the local image registry:
|
||||||
* `cerc/go-ethereum`
|
* `cerc/go-ethereum`
|
||||||
* `cerc/lighthouse`
|
* `cerc/lighthouse`
|
||||||
@ -36,8 +38,9 @@ This should create the required docker images in the local image registry:
|
|||||||
* `cerc/foundry`
|
* `cerc/foundry`
|
||||||
* `cerc/optimism-contracts`
|
* `cerc/optimism-contracts`
|
||||||
* `cerc/optimism-l2geth`
|
* `cerc/optimism-l2geth`
|
||||||
* `cerc/optimism-op-batcher`
|
|
||||||
* `cerc/optimism-op-node`
|
* `cerc/optimism-op-node`
|
||||||
|
* `cerc/optimism-op-batcher`
|
||||||
|
* `cerc/optimism-op-proposer`
|
||||||
|
|
||||||
## Deploy
|
## Deploy
|
||||||
|
|
||||||
@ -47,12 +50,14 @@ Deploy the stack:
|
|||||||
laconic-so --stack fixturenet-optimism deploy up
|
laconic-so --stack fixturenet-optimism deploy up
|
||||||
```
|
```
|
||||||
|
|
||||||
The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it:
|
If you get the error `service "fixturenet-optimism-contracts" didn't complete successfully: exit 1` with ~25 lines of Traceback, wait 15-20 mins then re-run the command.
|
||||||
|
|
||||||
|
The `fixturenet-optimism-contracts` service takes a while to complete running as it:
|
||||||
1. waits for the 'Merge' to happen on L1
|
1. waits for the 'Merge' to happen on L1
|
||||||
2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups)
|
2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups)
|
||||||
3. deploys the L1 contracts
|
3. deploys the L1 contracts
|
||||||
|
|
||||||
To list down and monitor the running containers:
|
To list and monitor the running containers:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-optimism deploy ps
|
laconic-so --stack fixturenet-optimism deploy ps
|
||||||
@ -69,7 +74,7 @@ docker logs -f <CONTAINER_ID>
|
|||||||
Stop all services running in the background:
|
Stop all services running in the background:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-optimism deploy down
|
laconic-so --stack fixturenet-optimism deploy down 30
|
||||||
```
|
```
|
||||||
|
|
||||||
Clear volumes created by this stack:
|
Clear volumes created by this stack:
|
||||||
|
@ -19,21 +19,22 @@ Checkout to the required versions and branches in repos:
|
|||||||
```bash
|
```bash
|
||||||
# Optimism
|
# Optimism
|
||||||
cd ~/cerc/optimism
|
cd ~/cerc/optimism
|
||||||
git checkout @eth-optimism/sdk@0.0.0-20230329025055
|
git checkout v1.0.4
|
||||||
```
|
```
|
||||||
|
|
||||||
Build the container images:
|
Build the container images:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher
|
laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher,cerc/optimism-op-proposer
|
||||||
```
|
```
|
||||||
|
|
||||||
This should create the required docker images in the local image registry:
|
This should create the required docker images in the local image registry:
|
||||||
* `cerc/foundry`
|
* `cerc/foundry`
|
||||||
* `cerc/optimism-contracts`
|
* `cerc/optimism-contracts`
|
||||||
* `cerc/optimism-l2geth`
|
* `cerc/optimism-l2geth`
|
||||||
* `cerc/optimism-op-batcher`
|
|
||||||
* `cerc/optimism-op-node`
|
* `cerc/optimism-op-node`
|
||||||
|
* `cerc/optimism-op-batcher`
|
||||||
|
* `cerc/optimism-op-proposer`
|
||||||
|
|
||||||
## Deploy
|
## Deploy
|
||||||
|
|
||||||
@ -89,7 +90,7 @@ docker logs -f <CONTAINER_ID>
|
|||||||
Stop all services running in the background:
|
Stop all services running in the background:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism down
|
laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism down 30
|
||||||
```
|
```
|
||||||
|
|
||||||
Clear volumes created by this stack:
|
Clear volumes created by this stack:
|
||||||
|
@ -16,6 +16,7 @@ containers:
|
|||||||
- cerc/optimism-op-node
|
- cerc/optimism-op-node
|
||||||
- cerc/optimism-l2geth
|
- cerc/optimism-l2geth
|
||||||
- cerc/optimism-op-batcher
|
- cerc/optimism-op-batcher
|
||||||
|
- cerc/optimism-op-proposer
|
||||||
pods:
|
pods:
|
||||||
- fixturenet-eth
|
- fixturenet-eth
|
||||||
- fixturenet-optimism
|
- fixturenet-optimism
|
||||||
|
@ -35,7 +35,7 @@ git checkout v0.1.2
|
|||||||
|
|
||||||
# Optimism
|
# Optimism
|
||||||
cd ~/cerc/optimism
|
cd ~/cerc/optimism
|
||||||
git checkout @eth-optimism/sdk@0.0.0-20230329025055
|
git checkout v1.0.4
|
||||||
```
|
```
|
||||||
|
|
||||||
Build the container images:
|
Build the container images:
|
||||||
@ -111,7 +111,7 @@ Follow the [demo](./demo.md) to try out the MobyMask app with L2 chain
|
|||||||
Stop all the services running in background run:
|
Stop all the services running in background run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
laconic-so --stack mobymask-v2 deploy-system down
|
laconic-so --stack mobymask-v2 deploy-system down 30
|
||||||
```
|
```
|
||||||
|
|
||||||
Clear volumes created by this stack:
|
Clear volumes created by this stack:
|
||||||
|
@ -81,10 +81,11 @@ Add the following contents to `mobymask-watcher.env`:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Domain to be used in the relay node's announce address
|
# Domain to be used in the relay node's announce address
|
||||||
CERC_RELAY_ANNOUNCE_DOMAIN="example.com"
|
CERC_RELAY_ANNOUNCE_DOMAIN="mobymask.example.com"
|
||||||
|
|
||||||
|
|
||||||
# DO NOT CHANGE THESE VALUES
|
# DO NOT CHANGE THESE VALUES
|
||||||
|
CERC_L2_GETH_RPC="https://mobymask-l2.dev.vdb.to"
|
||||||
CERC_DEPLOYED_CONTRACT="0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9"
|
CERC_DEPLOYED_CONTRACT="0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9"
|
||||||
CERC_ENABLE_PEER_L2_TXS=false
|
CERC_ENABLE_PEER_L2_TXS=false
|
||||||
CERC_RELAY_PEERS=["/dns4/relay1.dev.vdb.to/tcp/443/wss/p2p/12D3KooWAx83SM9GWVPc9v9fNzLzftRX6EaAFMjhYiFxRYqctcW1", "/dns4/relay2.dev.vdb.to/tcp/443/wss/p2p/12D3KooWBycy6vHVEfUwwYRbPLBdb5gx9gtFSEMpErYPUjUkDNkm", "/dns4/relay3.dev.vdb.to/tcp/443/wss/p2p/12D3KooWARcUJsiGCgiygiRVVK94U8BNSy8DFBbzAF3B6orrabwn"]
|
CERC_RELAY_PEERS=["/dns4/relay1.dev.vdb.to/tcp/443/wss/p2p/12D3KooWAx83SM9GWVPc9v9fNzLzftRX6EaAFMjhYiFxRYqctcW1", "/dns4/relay2.dev.vdb.to/tcp/443/wss/p2p/12D3KooWBycy6vHVEfUwwYRbPLBdb5gx9gtFSEMpErYPUjUkDNkm", "/dns4/relay3.dev.vdb.to/tcp/443/wss/p2p/12D3KooWARcUJsiGCgiygiRVVK94U8BNSy8DFBbzAF3B6orrabwn"]
|
||||||
@ -99,9 +100,8 @@ laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mo
|
|||||||
|
|
||||||
# Expected output (ignore the "The X variable is not set. Defaulting to a blank string." warnings):
|
# Expected output (ignore the "The X variable is not set. Defaulting to a blank string." warnings):
|
||||||
|
|
||||||
# [+] Running 10/10
|
# [+] Running 9/9
|
||||||
# ✔ Network mobymask_v2_default Created 0.1s
|
# ✔ Network mobymask_v2_default Created 0.1s
|
||||||
# ✔ Volume "mobymask_v2_fixturenet_geth_accounts" Created 0.0s
|
|
||||||
# ✔ Volume "mobymask_v2_peers_ids" Created 0.0s
|
# ✔ Volume "mobymask_v2_peers_ids" Created 0.0s
|
||||||
# ✔ Volume "mobymask_v2_mobymask_watcher_db_data" Created 0.0s
|
# ✔ Volume "mobymask_v2_mobymask_watcher_db_data" Created 0.0s
|
||||||
# ✔ Volume "mobymask_v2_mobymask_deployment" Created 0.0s
|
# ✔ Volume "mobymask_v2_mobymask_deployment" Created 0.0s
|
||||||
@ -116,7 +116,7 @@ This will run the `mobymask-v2-watcher` including:
|
|||||||
* A relay node which is in a federated setup with relay nodes set in the env file
|
* A relay node which is in a federated setup with relay nodes set in the env file
|
||||||
* A peer node which connects to the watcher relay node as an entrypoint to the MobyMask watcher p2p network. This peer listens for messages from other peers on the network and logs them out to the console
|
* A peer node which connects to the watcher relay node as an entrypoint to the MobyMask watcher p2p network. This peer listens for messages from other peers on the network and logs them out to the console
|
||||||
|
|
||||||
The watcher endpoint is exposed on host port `3001` and the relay node endpoint is exposed on host port `9090`
|
The watcher GraphQL endpoint is exposed on host port `3001` and the relay node endpoint is exposed on host port `9090`
|
||||||
|
|
||||||
To list down and monitor the running containers:
|
To list down and monitor the running containers:
|
||||||
|
|
||||||
@ -156,18 +156,30 @@ Check watcher container logs to get multiaddr advertised by the watcher's relay
|
|||||||
|
|
||||||
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Relay node started with id 12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai (characteristic-black-pamella)
|
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Relay node started with id 12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai (characteristic-black-pamella)
|
||||||
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Listening on:
|
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Listening on:
|
||||||
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.070Z laconic:relay /dns4/example.com/tcp/443/wss/p2p/12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai
|
# mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.070Z laconic:relay /dns4/mobymask.example.com/tcp/443/wss/p2p/12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai
|
||||||
```
|
```
|
||||||
|
|
||||||
## Web App
|
## Web App
|
||||||
|
|
||||||
To be able to connect to the relay node from remote peers, it needs to be publicly reachable. Configure your website with SSL and the `https` traffic forwarded to port `9090`.
|
To be able to connect to the relay node from remote peers, it needs to be publicly reachable.
|
||||||
|
Configure your website with SSL and the `https` traffic reverse proxied as:
|
||||||
|
* `/graphql` to port `3001` (watcher GQL endpoint)
|
||||||
|
* `/` to port `9090` (relay node)
|
||||||
|
|
||||||
For example, a Nginx configuration for domain `example.com` would look something like:
|
For example, a Nginx configuration for domain `mobymask.example.com` would look something like:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
server {
|
server {
|
||||||
server_name example.com;
|
server_name mobymask.example.com;
|
||||||
|
|
||||||
|
location /graphql {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://127.0.0.1:3001;
|
||||||
|
proxy_read_timeout 90;
|
||||||
|
}
|
||||||
|
|
||||||
# https://nginx.org/en/docs/http/websocket.html
|
# https://nginx.org/en/docs/http/websocket.html
|
||||||
location / {
|
location / {
|
||||||
@ -182,35 +194,37 @@ For example, a Nginx configuration for domain `example.com` would look something
|
|||||||
|
|
||||||
listen [::]:443 ssl ipv6only=on; # managed by Certbot
|
listen [::]:443 ssl ipv6only=on; # managed by Certbot
|
||||||
listen 443 ssl; # managed by Certbot
|
listen 443 ssl; # managed by Certbot
|
||||||
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; # managed by Certbot
|
ssl_certificate /etc/letsencrypt/live/mobymask.example.com/fullchain.pem; # managed by Certbot
|
||||||
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; # managed by Certbot
|
ssl_certificate_key /etc/letsencrypt/live/mobymask.example.com/privkey.pem; # managed by Certbot
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||||
}
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
if ($host = example.com) {
|
if ($host = mobymask.example.com) {
|
||||||
return 301 https://$host$request_uri;
|
return 301 https://$host$request_uri;
|
||||||
} # managed by Certbot
|
} # managed by Certbot
|
||||||
|
|
||||||
listen 80;
|
listen 80;
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
|
|
||||||
server_name example.com;
|
server_name mobymask.example.com;
|
||||||
return 404; # managed by Certbot
|
return 404; # managed by Certbot
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
To connect a browser peer to the watcher's relay node:
|
To test the web-app, either visit https://mobymask-lxdao-app.dev.vdb.to/ or follow [web-app.md](./web-app.md) to deploy the app locally that hits your watcher's GQL endpoint
|
||||||
* Visit https://mobymask-lxdao-app.dev.vdb.to/
|
|
||||||
|
Connect a browser peer to the watcher's relay node:
|
||||||
* Click on debug panel on bottom right of the homepage
|
* Click on debug panel on bottom right of the homepage
|
||||||
* Select `<custom>` in `Primary Relay` dropdown on the right and enter the watcher relay node's multiaddr
|
* Select `<custom>` in `Primary Relay` dropdown on the right and enter the watcher relay node's multiaddr
|
||||||
* Click on `UPDATE` to refresh the page and connect to the watcher's relay node; you should see the relay node's multiaddr in `Self Node Info` on the debug panel
|
* Click on `UPDATE` to refresh the page and connect to the watcher's relay node; you should see the relay node's multiaddr in `Self Node Info` on the debug panel
|
||||||
* Switch to the `GRAPH (PEERS)` tab to see peers connected to this browser node and the `GRAPH (NETWORK)` tab to see the whole MobyMask p2p network
|
* Switch to the `GRAPH (PEERS)` tab to see peers connected to this browser node and the `GRAPH (NETWORK)` tab to see the whole MobyMask p2p network
|
||||||
|
|
||||||
Perform transactions (invite required):
|
Perform transactions:
|
||||||
* Open the invite link in a browser and open the debug panel
|
* An invitation is required to be able to perform transactions; ask an existing user of the app for an invite
|
||||||
* Confirm that the browser peer is connected to at least one other peer, then close the debug panel
|
* In a browser, close the app if it's already open and then open the invite link
|
||||||
|
* From the debug panel, confirm that the browser peer is connected to at least one other peer
|
||||||
* Check the status for a phisher to be reported in the `Check Phisher Status` section on homepage
|
* Check the status for a phisher to be reported in the `Check Phisher Status` section on homepage
|
||||||
* Select `Report Phisher` option in the `Pending reports` section, enter multiple phisher records and click on the `Submit batch to p2p network` button; this broadcasts signed invocations to peers on the network, including the watcher peer
|
* Select `Report Phisher` option in the `Pending reports` section, enter multiple phisher records and click on the `Submit batch to p2p network` button; this broadcasts signed invocations to peers on the network, including the watcher peer
|
||||||
* Check the watcher container logs to see the message received:
|
* Check the watcher container logs to see the message received:
|
||||||
@ -287,18 +301,17 @@ Clear volumes created by this stack:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# List all relevant volumes
|
# List all relevant volumes
|
||||||
docker volume ls -q --filter "name=mobymask_v2*"
|
docker volume ls -q --filter "name=mobymask_v2"
|
||||||
|
|
||||||
# Expected output:
|
# Expected output:
|
||||||
|
|
||||||
# mobymask_v2_fixturenet_geth_accounts
|
|
||||||
# mobymask_v2_mobymask_deployment
|
# mobymask_v2_mobymask_deployment
|
||||||
# mobymask_v2_mobymask_watcher_db_data
|
# mobymask_v2_mobymask_watcher_db_data
|
||||||
# mobymask_v2_peers_ids
|
# mobymask_v2_peers_ids
|
||||||
|
|
||||||
|
|
||||||
# Remove all the listed volumes
|
# Remove all the listed volumes
|
||||||
docker volume rm $(docker volume ls -q --filter "name=mobymask_v2*")
|
docker volume rm $(docker volume ls -q --filter "name=mobymask_v2")
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
162
app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md
Normal file
162
app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
# MobyMask Watcher P2P Network - Web App
|
||||||
|
|
||||||
|
Instructions to setup and deploy the MobyMask app locally, pointed to a watcher on the p2p network
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
* Laconic Stack Orchestrator ([installation](/README.md#install))
|
||||||
|
* Watcher GQL endpoint
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
Build the container images:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack mobymask-v2 build-containers --include cerc/react-peer,cerc/mobymask-ui
|
||||||
|
```
|
||||||
|
|
||||||
|
Check that the required images are created in the local image registry:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker image ls
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
|
||||||
|
# REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||||
|
# cerc/react-peer local d66b144dbb53 4 days ago 868MB
|
||||||
|
# cerc/mobymask-ui local e456bf9937ec 4 days ago 1.67GB
|
||||||
|
# .
|
||||||
|
# .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Create an env file `mobymask-app.env`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
touch mobymask-app.env
|
||||||
|
```
|
||||||
|
|
||||||
|
Add the following contents to `mobymask-app.env`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Watcher endpoint used by the app for GQL queries
|
||||||
|
CERC_APP_WATCHER_URL="http://127.0.0.1:3001"
|
||||||
|
|
||||||
|
|
||||||
|
# DO NOT CHANGE THESE VALUES
|
||||||
|
CERC_DEPLOYED_CONTRACT="0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9"
|
||||||
|
CERC_RELAY_NODES=["/dns4/relay1.dev.vdb.to/tcp/443/wss/p2p/12D3KooWAx83SM9GWVPc9v9fNzLzftRX6EaAFMjhYiFxRYqctcW1","/dns4/relay2.dev.vdb.to/tcp/443/wss/p2p/12D3KooWBycy6vHVEfUwwYRbPLBdb5gx9gtFSEMpErYPUjUkDNkm","/dns4/relay3.dev.vdb.to/tcp/443/wss/p2p/12D3KooWARcUJsiGCgiygiRVVK94U8BNSy8DFBbzAF3B6orrabwn"]
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace `CERC_APP_WATCHER_URL` with the watcher's endpoint (eg. `https://mobymask.example.com`)
|
||||||
|
|
||||||
|
### Deploy the stack
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack mobymask-v2 deploy --cluster mm_v2 --include mobymask-app --env-file mobymask-app.env up lxdao-mobymask-app
|
||||||
|
|
||||||
|
# Expected output (ignore the "The X variable is not set. Defaulting to a blank string." warnings):
|
||||||
|
|
||||||
|
# [+] Running 4/4
|
||||||
|
# ✔ Network mm_v2_default Created 0.1s
|
||||||
|
# ✔ Volume "mm_v2_peers_ids" Created 0.0s
|
||||||
|
# ✔ Volume "mm_v2_mobymask_deployment" Created 0.0s
|
||||||
|
# ✔ Container mm_v2-lxdao-mobymask-app-1 Started 1.1s
|
||||||
|
```
|
||||||
|
|
||||||
|
This will run the `lxdao-mobymask-app` (at `http://localhost:3004`) pointed to `CERC_APP_WATCHER_URL` for GQL queries
|
||||||
|
|
||||||
|
To monitor the running container:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# With status
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
|
||||||
|
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||||
|
# f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3004->80/tcp mm_v2-lxdao-mobymask-app-1
|
||||||
|
|
||||||
|
# Check logs for a container
|
||||||
|
docker logs -f mm_v2-lxdao-mobymask-app-1
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
|
||||||
|
# .
|
||||||
|
# .
|
||||||
|
# .
|
||||||
|
# Available on:
|
||||||
|
# http://127.0.0.1:80
|
||||||
|
# http://192.168.0.2:80
|
||||||
|
# Hit CTRL-C to stop the server
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: For opening an invite link on this deployed app, replace the URL part before `/#` with `http://localhost:3004`
|
||||||
|
For example: `http://localhost:3004/#/members?invitation=XYZ`
|
||||||
|
|
||||||
|
In order to host the app using a public domain, configure your website with SSL and `https` traffic reverse proxied to port `3004`.
|
||||||
|
|
||||||
|
For example, a Nginx configuration for domain `my-mobymask-app.example.com` would look something like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
server {
|
||||||
|
server_name my-mobymask-app.example.com;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:3004;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
}
|
||||||
|
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
listen 443 ssl;
|
||||||
|
ssl_certificate /etc/letsencrypt/live/my-mobymask-app.example.com/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/my-mobymask-app.example.com/privkey.pem;
|
||||||
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
|
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
if ($host = my-mobymask-app.example.com) {
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
} # managed by Certbot
|
||||||
|
|
||||||
|
server_name my-mobymask-app.example.com;
|
||||||
|
listen 80;
|
||||||
|
return 404; # managed by Certbot
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Clean up
|
||||||
|
|
||||||
|
Stop all services running in the background:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack mobymask-v2 deploy --cluster mm_v2 --include mobymask-app down
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
|
||||||
|
# [+] Running 2/2
|
||||||
|
# ✔ Container mm_v2-lxdao-mobymask-app-1 Removed 10.6s
|
||||||
|
# ✔ Network mm_v2_default Removed 0.5s
|
||||||
|
```
|
||||||
|
|
||||||
|
Clear volumes created by this stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all relevant volumes
|
||||||
|
docker volume ls -q --filter "name=mm_v2"
|
||||||
|
|
||||||
|
# Expected output:
|
||||||
|
|
||||||
|
# mm_v2_mobymask_deployment
|
||||||
|
# mm_v2_peers_ids
|
||||||
|
|
||||||
|
# Remove all the listed volumes
|
||||||
|
docker volume rm $(docker volume ls -q --filter "name=mm_v2")
|
||||||
|
```
|
@ -22,16 +22,12 @@ Create and update an env file to be used in the next step ([defaults](../../conf
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Set of relay nodes to be used by the web-app
|
# Set of relay nodes to be used by the web-app
|
||||||
# (use double quotes " for strings)
|
# (use double quotes " for strings, avoid space after commas)
|
||||||
# Eg. CERC_RELAY_NODES=["/dns4/example.com/tcp/443/wss/p2p/12D3KooWGHmDDCc93XUWL16FMcTPCGu2zFaMkf67k8HZ4gdQbRDr"]
|
# Eg. CERC_RELAY_NODES=["/dns4/example.com/tcp/443/wss/p2p/12D3KooWGHmDDCc93XUWL16FMcTPCGu2zFaMkf67k8HZ4gdQbRDr"]
|
||||||
CERC_RELAY_NODES=[]
|
CERC_RELAY_NODES=[]
|
||||||
|
|
||||||
# Also add if running MobyMask app:
|
# Also add if running MobyMask app:
|
||||||
|
|
||||||
# External watcher endpoint (to check if watcher is up)
|
|
||||||
CERC_WATCHER_HOST=
|
|
||||||
CERC_WATCHER_PORT=
|
|
||||||
|
|
||||||
# Watcher endpoint used by the app for GQL queries
|
# Watcher endpoint used by the app for GQL queries
|
||||||
CERC_APP_WATCHER_URL="http://127.0.0.1:3001"
|
CERC_APP_WATCHER_URL="http://127.0.0.1:3001"
|
||||||
|
|
||||||
@ -50,7 +46,7 @@ For running mobymask-app
|
|||||||
```bash
|
```bash
|
||||||
laconic-so --stack mobymask-v2 deploy --include mobymask-app --env-file <PATH_TO_ENV_FILE> up
|
laconic-so --stack mobymask-v2 deploy --include mobymask-app --env-file <PATH_TO_ENV_FILE> up
|
||||||
|
|
||||||
# Runs on host port 3002
|
# Runs mobymask-app on host port 3002 and lxdao-mobymask-app on host port 3004
|
||||||
```
|
```
|
||||||
|
|
||||||
For running peer-test-app
|
For running peer-test-app
|
||||||
|
@ -74,7 +74,13 @@ def command(ctx, include, exclude, env_file, cluster, command, extra_args):
|
|||||||
elif command == "down":
|
elif command == "down":
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Running compose down")
|
print("Running compose down")
|
||||||
docker.compose.down()
|
|
||||||
|
timeout_arg = None
|
||||||
|
if extra_args_list:
|
||||||
|
timeout_arg=extra_args_list[0]
|
||||||
|
|
||||||
|
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||||
|
docker.compose.down(timeout=timeout_arg)
|
||||||
elif command == "exec":
|
elif command == "exec":
|
||||||
if extra_args_list is None or len(extra_args_list) < 2:
|
if extra_args_list is None or len(extra_args_list) < 2:
|
||||||
print("Usage: exec <service> <cmd>")
|
print("Usage: exec <service> <cmd>")
|
||||||
|
183
docs/laconicd-fixturenet.md
Normal file
183
docs/laconicd-fixturenet.md
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
# Running a laconicd fixturenet with console
|
||||||
|
|
||||||
|
The following tutorial explains the steps to run a laconicd fixturenet with CLI and web console that displays records in the registry. It is designed as an introduction to Stack Orchestrator and to showcase one component of the Laconic Stack. Prior to Stack Orchestrator, the following 4 repositories had to be cloned and setup manually:
|
||||||
|
|
||||||
|
- https://github.com/cerc-io/laconicd
|
||||||
|
- https://github.com/cerc-io/laconic-sdk
|
||||||
|
- https://github.com/cerc-io/laconic-registry-cli
|
||||||
|
- https://github.com/cerc-io/laconic-console
|
||||||
|
|
||||||
|
Now, with Stack Orchestrator, it is a few quick commands. Additionally, the `docker` and `docker compose` integration on the back-end allows the stack to easily persist, facilitating workflows.
|
||||||
|
|
||||||
|
## Setup laconic-so
|
||||||
|
|
||||||
|
To avoid hiccups on Mac M1/M2 and any local machine nuances that may affect the user experience, this tutorial is focused on using a fresh Digital Ocean (DO) droplet with similar specs:
|
||||||
|
16 GB Memory / 8 Intel vCPUs / 160 GB Disk.
|
||||||
|
|
||||||
|
1. Login to the droplet as root (either by SSH key or password set in the DO console)
|
||||||
|
|
||||||
|
```
|
||||||
|
ssh root@IP
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Get the install script, give it executable permissions, and run it:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-ubuntu.sh
|
||||||
|
```
|
||||||
|
```
|
||||||
|
chmod +x install.sh
|
||||||
|
```
|
||||||
|
```
|
||||||
|
bash install.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Confirm docker was installed and activate the changes in `~/.profile`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run hello-world
|
||||||
|
```
|
||||||
|
```
|
||||||
|
source ~/.profile
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Verify installation:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setup the laconic fixturenet stack
|
||||||
|
|
||||||
|
1. Get the repositories
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include cerc-io/laconicd,cerc-io/laconic-sdk,cerc-io/laconic-registry-cli,cerc-io/laconic-console
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set this environment variable to the Laconic self-hosted Gitea instance:
|
||||||
|
|
||||||
|
```
|
||||||
|
export CERC_NPM_REGISTRY_URL=https://git.vdb.to/api/packages/cerc-io/npm/
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Build the containers:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
It's possible to run into an `ESOCKETTIMEDOUT` error, e.g., `error An unexpected error occurred: "https://registry.yarnpkg.com/@material-ui/icons/-/icons-4.11.3.tgz: ESOCKETTIMEDOUT"`. This may happen even if you have a great internet connection. In that case, re-run the `build-containers` command.
|
||||||
|
|
||||||
|
4. Set this environment variable to your droplet's IP address:
|
||||||
|
|
||||||
|
```
|
||||||
|
export LACONIC_HOSTED_ENDPOINT=http://<your-IP>
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Deploy the stack:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy up
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Check the logs:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy logs
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll see output from `laconicd` and the block height should be >1 to confirm it is running:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:29PM INF indexed block exents height=12 module=txindex server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF Timed out dur=4976.960115 height=13 module=consensus round=0 server=node step=1
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received proposal module=consensus proposal={"Type":32,"block_id":{"hash":"D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4","parts":{"hash":"22411A20B7F14CDA33244420FBDDAF24450C0628C7A06034FF22DAC3699DDCC8","total":1}},"height":13,"pol_round":-1,"round":0,"signature":"DEuqnaQmvyYbUwckttJmgKdpRu6eVm9i+9rQ1pIrV2PidkMNdWRZBLdmNghkIrUzGbW8Xd7UVJxtLRmwRASgBg==","timestamp":"2023-04-18T21:30:01.49450663Z"} server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF received complete proposal block hash=D26C088A711F912ADB97888C269F628DA33153795621967BE44DCB43C3D03CA4 height=13 module=consensus server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF finalizing commit of block hash={} height=13 module=consensus num_txs=0 root=1A8CA1AF139CCC80EC007C6321D8A63A46A793386EE2EDF9A5CA0AB2C90728B7 server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF minted coins from module account amount=2059730459416582643aphoton from=mint module=x/bank
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF executed block height=13 module=state num_invalid_txs=0 num_valid_txs=0 server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF commit synced commit=436F6D6D697449447B5B363520313037203630203232372039352038352032303820313334203231392032303520313433203130372031343920313431203139203139322038362031323720362031383520323533203137362031333820313735203135392031383620323334203135382031323120313431203230342037335D3A447D
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF committed state app_hash=416B3CE35F55D086DBCD8F6B958D13C0567F06B9FDB08AAF9FBAEA9E798DCC49 height=13 module=state num_txs=0 server=node
|
||||||
|
laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed block exents height=13 module=txindex server=node
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Confirm operation of the registry CLI:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configure Digital Ocean firewall
|
||||||
|
|
||||||
|
Let's open some ports.
|
||||||
|
|
||||||
|
1. In the Digital Ocean web console, navigate to your droplet's main page. Select the "Networking" tab and scroll down to "Firewall".
|
||||||
|
|
||||||
|
2. Get the port for the running console:
|
||||||
|
|
||||||
|
```
|
||||||
|
echo http://IP:$(laconic-so --stack fixturenet-laconic-loaded deploy port laconic-console 80 | cut -d ':' -f 2)
|
||||||
|
```
|
||||||
|
```
|
||||||
|
http://IP:32778
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Go back to the Digital Ocean web console and set an Inbound Rule for Custom TCP of the above port:
|
||||||
|
|
||||||
|
- `32778` in this example, but yours will be different.
|
||||||
|
- do the same for port `9473`
|
||||||
|
|
||||||
|
Additional ports will need to be opened depending on your application. Ensure you add your droplet to this new Firewall and wait a minute or so for the update to propagate.
|
||||||
|
|
||||||
|
4. Navigate to http://IP:port and ensure laconic-console is functioning as expected:
|
||||||
|
|
||||||
|
- ensure you are connected to `laconicd`; no error message should pop up;
|
||||||
|
- the wifi symbol in the bottom right should have a green check mark beside it
|
||||||
|
- navigate to the status tab; it should display similar/identical information
|
||||||
|
- navigate to the config tab, you'll see something like (with your IP):
|
||||||
|
|
||||||
|
```
|
||||||
|
wns
|
||||||
|
webui http://68.183.195.210:9473/console
|
||||||
|
server http://68.183.195.210:9473/api
|
||||||
|
```
|
||||||
|
|
||||||
|
## Publish and query a sample record to the registry
|
||||||
|
|
||||||
|
1. The following command will create a bond and publish a record:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-demo-records.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll get an output like:
|
||||||
|
|
||||||
|
```
|
||||||
|
Balance is: 99998999999999998999600000
|
||||||
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
|
```
|
||||||
|
|
||||||
|
The sample record we deployed looks like:
|
||||||
|
|
||||||
|
```
|
||||||
|
TODO
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Return to the laconic-console
|
||||||
|
|
||||||
|
- the published record should now be viewable
|
||||||
|
- explore it for more information
|
||||||
|
- click on the link that opens the GraphQL console
|
||||||
|
- the query is pre-loaded, click the button to run it
|
||||||
|
- inspect the output
|
||||||
|
|
||||||
|
3. Try out additional CLI commands
|
||||||
|
|
||||||
|
- these are documented [here](https://github.com/cerc-io/laconic-registry-cli#readme) and updates are forthcoming
|
||||||
|
- e.g,:
|
||||||
|
|
||||||
|
```
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns record list"
|
||||||
|
```
|
Loading…
Reference in New Issue
Block a user