Handle restarts for services in fixturenet-optimism stack (#282)

* Check existing L1 contracts deployment

* Rename volume used for generated L2 config

* Check for existing L2 geth data directory

* Cross check existing L2 config against L1 deployment config

* Verify sequencer key in existing L2 geth data directory

* Add instructions to troubleshoot corrupt L2 geth dir

* Separate out instructions to run L2 with external L1

* Update docs

Former-commit-id: 9ffa9bb5a9
This commit is contained in:
prathamesh0 2023-04-05 10:25:50 +05:30 committed by GitHub
parent 0c5f252465
commit 464ef89a01
7 changed files with 247 additions and 55 deletions

View File

@ -14,6 +14,7 @@ services:
"./wait-for-it.sh -h $${L1_HOST} -p $${L1_PORT} -s -t 60 -- ./run.sh"
volumes:
- ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
@ -24,7 +25,7 @@ services:
extra_hosts:
- "host.docker.internal:host-gateway"
# Generates the config files required for L2 (outputs to volume op_node_data)
# Generates the config files required for L2 (outputs to volume l2_config)
op-node-l2-config-gen:
image: cerc/optimism-op-node:local
depends_on:
@ -35,12 +36,12 @@ services:
volumes:
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/contracts-bedrock:ro
- op_node_data:/app
- l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
image: cerc/optimism-l2geth:local
depends_on:
@ -48,8 +49,9 @@ services:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- op_node_data:/op-node:ro
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh"
command: "/run-op-geth.sh"
ports:
@ -71,7 +73,7 @@ services:
image: cerc/optimism-op-node:local
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- op_node_data:/op-node-data:ro
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
@ -110,4 +112,5 @@ volumes:
fixturenet_geth_accounts:
l1_deployment:
l2_accounts:
op_node_data:
l2_config:
l2_geth_data:

View File

@ -4,6 +4,27 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \

View File

@ -4,22 +4,43 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO Support restarts; fixturenet-eth-geth currently starts fresh on a restart
# Exit if a deployment already exists (on restarts)
# if [ -d "deployments/getting-started" ]; then
# echo "Deployment directory deployments/getting-started already exists, exiting"
# exit 0
# fi
echo "Using L1 RPC endpoint ${L1_RPC}"
# Append tasks/index.ts file
echo "import './rekey-json'" >> tasks/index.ts
echo "import './send-balance'" >> tasks/index.ts
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json

View File

@ -4,16 +4,30 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
mkdir datadir
echo "pwd" > datadir/password
# TODO: Add in container build or use other tool
echo "installing jq"
echo "Installing jq"
apk update && apk add jq
# Get SEQUENCER KEY from keys.json
# Get SEQUENCER key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Initialize op-geth if datadir/geth not found
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
echo "Found existing datadir, checking block signer key"
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
echo "Sequencer and block signer keys match, skipping initialization"
else
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
exit 1
fi
else
echo "Initializing op-geth"
mkdir -p datadir
echo "pwd" > datadir/password
echo $SEQUENCER_KEY > datadir/block-signer-key
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
@ -24,14 +38,16 @@ do
sleep 5
done
echo "Config files created by op-node, proceeding with script..."
echo "Config files created by op-node, proceeding with the initialization..."
cp /op-node/genesis.json ./
geth init --datadir=datadir genesis.json
geth init --datadir=datadir /op-node/genesis.json
echo "Node Initialized"
fi
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
cp /op-node/jwt.txt ./
# Run op-geth
geth \
--datadir ./datadir \
--http \
@ -52,7 +68,7 @@ geth \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=./jwt.txt \
--authrpc.jwtsecret=/op-node/jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \

View File

@ -0,0 +1,30 @@
import { task } from 'hardhat/config'
import '@nomiclabs/hardhat-ethers'
task(
'verify-contract-deployment',
'Verifies the given contract deployment transaction'
)
.addParam('contract', 'Address of the contract deployed')
.addParam('transactionHash', 'Hash of the deployment transaction')
.setAction(async ({ contract, transactionHash }, { ethers }) => {
const provider = new ethers.providers.JsonRpcProvider(
`${process.env.L1_RPC}`
)
// Get the deployment tx receipt
const receipt = await provider.getTransactionReceipt(transactionHash)
if (
receipt &&
receipt.contractAddress &&
receipt.contractAddress === contract
) {
console.log(
`Deployment for contract ${contract} in transaction ${transactionHash} verified`
)
process.exit(0)
} else {
console.log(`Contract ${contract} deployment verification failed`)
process.exit(1)
}
})

View File

@ -0,0 +1,87 @@
# fixturenet-optimism
Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io)
## Setup
Prerequisite: An L1 Ethereum RPC endpoint
Clone required repositories:
```bash
laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum
```
Checkout to the required versions and branches in repos:
```bash
# Optimism
cd ~/cerc/optimism
git checkout @eth-optimism/sdk@0.0.0-20230329025055
```
Build the container images:
```bash
laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher
```
This should create the required docker images in the local image registry:
* `cerc/foundry`
* `cerc/optimism-contracts`
* `cerc/optimism-l2geth`
* `cerc/optimism-op-batcher`
* `cerc/optimism-op-node`
## Deploy
Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params
* NOTE:
* Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file
* If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port
Deploy the stack:
```bash
laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism
```
The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it:
1. waits for the 'Merge' to happen on L1
2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups)
3. deploys the L1 contracts
To list down and monitor the running containers:
```bash
laconic-so --stack fixturenet-optimism deploy ps
# With status
docker ps
# Check logs for a container
docker logs -f <CONTAINER_ID>
```
## Clean up
Stop all services running in the background:
```bash
laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism
```
Clear volumes created by this stack:
```bash
# List all relevant volumes
docker volume ls -q --filter name=laconic*
# Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter name=laconic*)
```
## Troubleshooting
See [Troubleshooting](./README.md#troubleshooting)

View File

@ -2,15 +2,14 @@
Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2)
We support running just the L2 part of stack, given an external L1 endpoint. Follow [L2-ONLY](./L2-ONLY.md) for the same.
## Setup
Clone required repositories:
```bash
laconic-so --stack fixturenet-optimism setup-repositories
# Exclude cerc-io/go-ethereum repository if running L1 separately
laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum
```
Checkout to the required versions and branches in repos:
@ -25,9 +24,6 @@ Build the container images:
```bash
laconic-so --stack fixturenet-optimism build-containers
# Only build containers required for L2 if running L1 separately
laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher
```
This should create the required docker images in the local image registry:
@ -43,19 +39,10 @@ This should create the required docker images in the local image registry:
## Deploy
(Optional) Update the [l1-params.env](../../config/fixturenet-optimism/l1-params.env) file with L1 endpoint (`L1_RPC`, `L1_HOST` and `L1_PORT`) and other params if running L1 separately
* NOTE:
* Stack Orchestrator needs to be run in [`dev`](/docs/CONTRIBUTING.md#install-developer-mode) mode to be able to edit the env file
* If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port
Deploy the stack:
```bash
laconic-so --stack fixturenet-optimism deploy up
# Only start fixturenet-optimism pod (L2) if running L1 separately
laconic-so --stack fixturenet-optimism deploy up --include fixturenet-optimism
```
The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it:
@ -81,9 +68,6 @@ Stop all services running in the background:
```bash
laconic-so --stack fixturenet-optimism deploy down
# If only ran fixturenet-optimism pod (L2)
laconic-so --stack fixturenet-optimism deploy down --include fixturenet-optimism
```
Clear volumes created by this stack:
@ -96,9 +80,39 @@ docker volume ls -q --filter name=laconic*
docker volume rm $(docker volume ls -q --filter name=laconic*)
```
## Troubleshooting
* If `op-geth` service aborts or is restarted, the following error might occur in the `op-node` service:
```bash
WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found"
```
* This means that the data directory that `op-geth` is using is corrupted and needs to be reinitialized; the containers `op-geth`, `op-node` and `op-batcher` need to be started afresh:
* Stop and remove the concerned containers:
```bash
# List the containers
docker ps -f "name=op-geth|op-node|op-batcher"
# Force stop and remove the listed containers
docker rm -f $(docker ps -qf "name=op-geth|op-node|op-batcher")
```
* Remove the concerned volume:
```bash
# List the volume
docker volume ls -q --filter name=l2_geth_data
# Remove the listed volume
docker volume rm $(docker volume ls -q --filter name=l2_geth_data)
```
* Reuse the deployment command used in [Deploy](#deploy) to restart the stopped containers
## Known Issues
* Currently not supported:
* Stopping and restarting the stack from where it left off; currently starts fresh on a restart
* Resource requirements (memory + time) for building `cerc/foundry` image are on the higher side
* `fixturenet-eth` currently starts fresh on a restart
* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side
* `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation)