diff --git a/.gitea/workflows/fixturenet-laconicd-test.yml b/.gitea/workflows/fixturenet-laconicd-test.yml new file mode 100644 index 00000000..ac397dad --- /dev/null +++ b/.gitea/workflows/fixturenet-laconicd-test.yml @@ -0,0 +1,55 @@ +name: Fixturenet-Laconicd-Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.gitea/workflows/triggers/fixturenet-laconicd-test' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + + +jobs: + test: + name: "Run an Laconicd fixturenet test" + runs-on: ubuntu-latest + steps: + - name: 'Update' + run: apt-get update + - name: 'Setup jq' + run: apt-get install jq -y + - name: 'Check jq' + run: | + which jq + jq --version + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Run fixturenet-laconicd tests" + run: ./tests/fixturenet-laconicd/run-test.sh diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml new file mode 100644 index 00000000..84cce91a --- /dev/null +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -0,0 +1,55 @@ +name: K8s Deploy Test + +on: + pull_request: + branches: '*' + push: + branches: + - main + - ci-test + paths-ignore: + - '.gitea/workflows/triggers/*' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + +jobs: + test: + name: "Run deploy test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Install Go" + uses: actions/setup-go@v4 + with: + go-version: '1.21' + - name: "Install Kind" + run: go install sigs.k8s.io/kind@v0.20.0 + - name: "Debug Kind" + run: kind create cluster --retain && docker logs kind-control-plane diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml new file mode 100644 index 00000000..9fbf84b2 --- /dev/null +++ b/.gitea/workflows/test-webapp.yml @@ -0,0 +1,49 @@ +name: Webapp Test + +on: + pull_request: + branches: '*' + push: + branches: + - main + - ci-test + paths-ignore: + - '.gitea/workflows/triggers/*' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + +jobs: + test: + name: "Run webapp test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Run webapp tests" + run: ./tests/webapp-test/run-webapp-test.sh diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test new file mode 100644 index 00000000..e6b73875 --- /dev/null +++ b/.gitea/workflows/triggers/fixturenet-laconicd-test @@ -0,0 +1,2 @@ +Change this file to trigger running the fixturenet-laconicd-test CI job + diff --git a/.github/workflows/fixturenet-laconicd.yml b/.github/workflows/fixturenet-laconicd.yml new file mode 100644 index 00000000..a16c1fe6 --- /dev/null +++ b/.github/workflows/fixturenet-laconicd.yml @@ -0,0 +1,30 @@ +name: Fixturenet-Laconicd Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.github/workflows/triggers/fixturenet-laconicd-test' + +jobs: + test: + name: "Run fixturenet-laconicd test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + - name: "Install Python" + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Run fixturenet-laconicd tests" + run: ./tests/fixturenet-laconicd/run-test.sh diff --git a/.github/workflows/test-webapp.yml b/.github/workflows/test-webapp.yml new file mode 100644 index 00000000..3b920828 --- /dev/null +++ b/.github/workflows/test-webapp.yml @@ -0,0 +1,29 @@ +name: Webapp Test + +on: + pull_request: + branches: '*' + push: + branches: '*' + +jobs: + test: + name: "Run webapp test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + - name: "Install Python" + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Run webapp tests" + run: ./tests/webapp-test/run-webapp-test.sh diff --git a/.github/workflows/triggers/fixturenet-laconicd-test b/.github/workflows/triggers/fixturenet-laconicd-test new file mode 100644 index 00000000..ad4c76a7 --- /dev/null +++ b/.github/workflows/triggers/fixturenet-laconicd-test @@ -0,0 +1,3 @@ +Change this file to trigger running the fixturenet-laconicd-test CI job + +trigger diff --git a/.gitignore b/.gitignore index 35a9c9ec..3aaa220b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,5 @@ laconic_stack_orchestrator.egg-info __pycache__ *~ package -app/data/build_tag.txt +stack_orchestrator/data/build_tag.txt /build diff --git a/README.md b/README.md index 52c06830..aa979e3a 100644 --- a/README.md +++ b/README.md @@ -64,12 +64,12 @@ laconic-so update ## Usage -The various [stacks](/app/data/stacks) each contain instructions for running different stacks based on your use case. For example: +The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: -- [self-hosted Gitea](/app/data/stacks/build-support) -- [an Optimism Fixturenet](/app/data/stacks/fixturenet-optimism) -- [laconicd with console and CLI](app/data/stacks/fixturenet-laconic-loaded) -- [kubo (IPFS)](app/data/stacks/kubo) +- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) +- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism) +- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded) +- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo) ## Contributing diff --git a/app/data/config/fixturenet-laconicd/create-fixturenet.sh b/app/data/config/fixturenet-laconicd/create-fixturenet.sh deleted file mode 100644 index 9c30bff8..00000000 --- a/app/data/config/fixturenet-laconicd/create-fixturenet.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh -# so we should have a mechanism to bundle it inside the container rather than link from here -# at deploy time. - -KEY="mykey" -CHAINID="laconic_9000-1" -MONIKER="localtestnet" -KEYRING="test" -KEYALGO="eth_secp256k1" -LOGLEVEL="info" -# trace evm -TRACE="--trace" -# TRACE="" - -# validate dependencies are installed -command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } - -# remove existing daemon and client -rm -rf ~/.laconic* - -make install - -laconicd config keyring-backend $KEYRING -laconicd config chain-id $CHAINID - -# if $KEY exists it should be deleted -laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO - -# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) -laconicd init $MONIKER --chain-id $CHAINID - -# Change parameter token denominations to aphoton -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -# Custom modules -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then - echo "Setting timers for expiry tests." - - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -fi - -if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then - echo "Enabling auction and setting timers." - - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -fi - -# increase block time (?) -cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -# Set gas limit in genesis -cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -# disable produce empty block -if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml - else - sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml -fi - -if [[ $1 == "pending" ]]; then - if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml - else - sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml - fi -fi - -# Allocate genesis accounts (cosmos formatted addresses) -laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING - -# Sign genesis transaction -laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID - -# Collect genesis tx -laconicd collect-gentxs - -# Run this to ensure everything worked and that the genesis file is setup correctly -laconicd validate-genesis - -if [[ $1 == "pending" ]]; then - echo "pending mode is on, please wait for the first block committed." -fi - -# Start the node (remove the --pruning=nothing flag if historical queries are not needed) -laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground diff --git a/app/data/config/fixturenet-optimism/generate-l2-config.sh b/app/data/config/fixturenet-optimism/generate-l2-config.sh deleted file mode 100755 index b10048d2..00000000 --- a/app/data/config/fixturenet-optimism/generate-l2-config.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -# Check existing config if it exists -if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then - echo "Found existing L2 config, cross-checking with L1 deployment config" - - SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json) - EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag') - EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress') - - GEN_L2_CONF=$(cat /app/rollup.json) - GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash') - GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr') - - if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then - echo "Config cross-checked, exiting" - exit 0 - fi - - echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting" - exit 1 -fi - -op-node genesis l2 \ - --deploy-config /contracts-bedrock/deploy-config/getting-started.json \ - --deployment-dir /contracts-bedrock/deployments/getting-started/ \ - --outfile.l2 /app/genesis.json \ - --outfile.rollup /app/rollup.json \ - --l1-rpc $CERC_L1_RPC - -openssl rand -hex 32 > /app/jwt.txt diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh b/app/data/config/fixturenet-optimism/optimism-contracts/run.sh deleted file mode 100755 index d878c03f..00000000 --- a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" - -echo "Using L1 RPC endpoint ${CERC_L1_RPC}" - -IMPORT_1="import './verify-contract-deployment'" -IMPORT_2="import './rekey-json'" -IMPORT_3="import './send-balance'" - -# Append mounted tasks to tasks/index.ts file if not present -if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then - echo "$IMPORT_1" >> tasks/index.ts - echo "$IMPORT_2" >> tasks/index.ts - echo "$IMPORT_3" >> tasks/index.ts -fi - -# Update the chainId in the hardhat config -sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts - -# Exit if a deployment already exists (on restarts) -# Note: fixturenet-eth-geth currently starts fresh on a restart -if [ -d "deployments/getting-started" ]; then - echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment" - - # Read JSON file into variable - SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json) - - # Parse JSON into variables - SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address') - SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash') - - if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then - echo "Deployment verfication successful, exiting" - exit 0 - else - echo "Deployment verfication failed, please clear L1 deployment volume before starting" - exit 1 - fi -fi - -# Generate the L2 account addresses -yarn hardhat rekey-json --output /l2-accounts/keys.json - -# Read JSON file into variable -KEYS_JSON=$(cat /l2-accounts/keys.json) - -# Parse JSON into variables -ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address') -ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey') -PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address') -BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address') -SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address') - -# Get the private keys of L1 accounts -if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \ - l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \ - [ "$l1_accounts_response" -eq 200 ]; -then - echo "Fetching L1 account credentials using provided URL" - mkdir -p /geth-accounts - wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL" - - CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2) - CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) - CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv) - CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) -else - echo "Couldn't fetch L1 account credentials, using them from env" -fi - -# Send balances to the above L2 addresses -yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started - -echo "Balances sent to L2 accounts" - -# Select a finalized L1 block as the starting point for roll ups -until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do - echo "Waiting for a finalized L1 block to exist, retrying after 10s" - sleep 10 -done - -L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}') -L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}') -L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}') - -echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups" - -# Update the deployment config -sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json -jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json - -node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH" - -echo "Updated the deployment config" - -# Create a .env file -echo "L1_RPC=$CERC_L1_RPC" > .env -echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env - -echo "Deploying the L1 smart contracts, this will take a while..." - -# Deploy the L1 smart contracts -yarn hardhat deploy --network getting-started --tags l1 - -echo "Deployed the L1 smart contracts" - -# Read Proxy contract's JSON and get the address -PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json) -PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address') - -# Send balance to the above Proxy contract in L1 for reflecting balance in L2 -# First account -yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -# Second account -yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started - -echo "Balance sent to Proxy L2 contract" -echo "Use following accounts for transactions in L2:" -echo "${CERC_L1_ADDRESS}" -echo "${CERC_L1_ADDRESS_2}" -echo "Done" diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js b/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js deleted file mode 100644 index 8a6c09d4..00000000 --- a/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js +++ /dev/null @@ -1,36 +0,0 @@ -const fs = require('fs') - -// Get the command-line argument -const configFile = process.argv[2] -const adminAddress = process.argv[3] -const proposerAddress = process.argv[4] -const batcherAddress = process.argv[5] -const sequencerAddress = process.argv[6] -const blockHash = process.argv[7] - -// Read the JSON file -const configData = fs.readFileSync(configFile) -const configObj = JSON.parse(configData) - -// Update the finalSystemOwner property with the ADMIN_ADDRESS value -configObj.finalSystemOwner = - configObj.portalGuardian = - configObj.controller = - configObj.l2OutputOracleChallenger = - configObj.proxyAdminOwner = - configObj.baseFeeVaultRecipient = - configObj.l1FeeVaultRecipient = - configObj.sequencerFeeVaultRecipient = - configObj.governanceTokenOwner = - adminAddress - -configObj.l2OutputOracleProposer = proposerAddress - -configObj.batchSenderAddress = batcherAddress - -configObj.p2pSequencerAddress = sequencerAddress - -configObj.l1StartingBlockTag = blockHash - -// Write the updated JSON object back to the file -fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2)) diff --git a/app/data/config/fixturenet-optimism/run-op-geth.sh b/app/data/config/fixturenet-optimism/run-op-geth.sh deleted file mode 100755 index 8b521f85..00000000 --- a/app/data/config/fixturenet-optimism/run-op-geth.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/sh -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -# TODO: Add in container build or use other tool -echo "Installing jq" -apk update && apk add jq - -# Get Sequencer key from keys.json -SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') - -# Initialize op-geth if datadir/geth not found -if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then - echo "Found existing datadir, checking block signer key" - - BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key) - - if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then - echo "Sequencer and block signer keys match, skipping initialization" - else - echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting" - exit 1 - fi -else - echo "Initializing op-geth" - - mkdir -p datadir - echo "pwd" > datadir/password - echo $SEQUENCER_KEY > datadir/block-signer-key - - geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key - - while [ ! -f "/op-node/jwt.txt" ] - do - echo "Config files not created. Checking after 5 seconds." - sleep 5 - done - - echo "Config files created by op-node, proceeding with the initialization..." - - geth init --datadir=datadir /op-node/genesis.json - echo "Node Initialized" -fi - -SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"') -echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}" - -cleanup() { - echo "Signal received, cleaning up..." - kill ${geth_pid} - - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-geth -geth \ - --datadir ./datadir \ - --http \ - --http.corsdomain="*" \ - --http.vhosts="*" \ - --http.addr=0.0.0.0 \ - --http.api=web3,debug,eth,txpool,net,engine \ - --ws \ - --ws.addr=0.0.0.0 \ - --ws.port=8546 \ - --ws.origins="*" \ - --ws.api=debug,eth,txpool,net,engine \ - --syncmode=full \ - --gcmode=archive \ - --nodiscover \ - --maxpeers=0 \ - --networkid=42069 \ - --authrpc.vhosts="*" \ - --authrpc.addr=0.0.0.0 \ - --authrpc.port=8551 \ - --authrpc.jwtsecret=/op-node/jwt.txt \ - --rollup.disabletxpoolgossip=true \ - --password=./datadir/password \ - --allow-insecure-unlock \ - --mine \ - --miner.etherbase=$SEQUENCER_ADDRESS \ - --unlock=$SEQUENCER_ADDRESS \ - & - -geth_pid=$! -wait $geth_pid diff --git a/app/data/config/fixturenet-optimism/run-op-node.sh b/app/data/config/fixturenet-optimism/run-op-node.sh deleted file mode 100755 index 516cf0a5..00000000 --- a/app/data/config/fixturenet-optimism/run-op-node.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -# Get Sequencer key from keys.json -SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') - -# Run op-node -op-node \ - --l2=http://op-geth:8551 \ - --l2.jwt-secret=/op-node-data/jwt.txt \ - --sequencer.enabled \ - --sequencer.l1-confs=3 \ - --verifier.l1-confs=3 \ - --rollup.config=/op-node-data/rollup.json \ - --rpc.addr=0.0.0.0 \ - --rpc.port=8547 \ - --p2p.disable \ - --rpc.enable-admin \ - --p2p.sequencer.key=$SEQUENCER_KEY \ - --l1=$CERC_L1_RPC \ - --l1.rpckind=any diff --git a/app/data/config/fixturenet-optimism/run-op-proposer.sh b/app/data/config/fixturenet-optimism/run-op-proposer.sh deleted file mode 100755 index 09746760..00000000 --- a/app/data/config/fixturenet-optimism/run-op-proposer.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -# Read the L2OutputOracle contract address from the deployment -L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json) -L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address') - -# Get Proposer key from keys.json -PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"') - -cleanup() { - echo "Signal received, cleaning up..." - kill ${proposer_pid} - - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-proposer -op-proposer \ - --poll-interval 12s \ - --rpc.port 8560 \ - --rollup-rpc http://op-node:8547 \ - --l2oo-address $L2OO_ADDR \ - --private-key $PROPOSER_KEY \ - --l1-eth-rpc $CERC_L1_RPC \ - & - -proposer_pid=$! -wait $proposer_pid diff --git a/app/data/config/watcher-azimuth/watcher-params.env b/app/data/config/watcher-azimuth/watcher-params.env deleted file mode 100644 index 8fcdc2d6..00000000 --- a/app/data/config/watcher-azimuth/watcher-params.env +++ /dev/null @@ -1,5 +0,0 @@ -# Defaults - -# ipld-eth-server endpoints -DEFAULT_CERC_IPLD_ETH_RPC= -DEFAULT_CERC_IPLD_ETH_GQL= diff --git a/app/data/container-build/cerc-laconic-console-host/config.yml b/app/data/container-build/cerc-laconic-console-host/config.yml deleted file mode 100644 index d557ace5..00000000 --- a/app/data/container-build/cerc-laconic-console-host/config.yml +++ /dev/null @@ -1,6 +0,0 @@ -# Config for laconic-console running in a fixturenet with laconicd - -services: - wns: - server: 'LACONIC_HOSTED_ENDPOINT:9473/api' - webui: 'LACONIC_HOSTED_ENDPOINT:9473/console' diff --git a/app/data/stacks/azimuth/README.md b/app/data/stacks/azimuth/README.md deleted file mode 100644 index 67f42b75..00000000 --- a/app/data/stacks/azimuth/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Azimuth Watcher - -Instructions to setup and deploy Azimuth Watcher stack - -## Setup - -Prerequisite: `ipld-eth-server` RPC and GQL endpoints - -Clone required repositories: - -```bash -laconic-so --stack azimuth setup-repositories -``` - -NOTE: If the repository already exists and checked out to a different version, `setup-repositories` command will throw an error. -For getting around this, the `azimuth-watcher-ts` repository can be removed and then run the command. - -Checkout to the required versions and branches in repos - -```bash -# azimuth-watcher-ts -cd ~/cerc/azimuth-watcher-ts -git checkout v0.1.0 -``` - -Build the container images: - -```bash -laconic-so --stack azimuth build-containers -``` - -This should create the required docker images in the local image registry. - -### Configuration - -* Create and update an env file to be used in the next step: - - ```bash - # External ipld-eth-server endpoints - CERC_IPLD_ETH_RPC= - CERC_IPLD_ETH_GQL= - ``` - -* NOTE: If `ipld-eth-server` is running on the host machine, use `host.docker.internal` as the hostname to access host ports - -### Deploy the stack - -* Deploy the containers: - - ```bash - laconic-so --stack azimuth deploy-system --env-file up - ``` - -* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` - -## Clean up - -Stop all the services running in background: - -```bash -laconic-so --stack azimuth deploy-system down -``` - -Clear volumes created by this stack: - -```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*watcher_db_data" - -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*watcher_db_data") -``` diff --git a/app/data/stacks/fixturenet-optimism/README.md b/app/data/stacks/fixturenet-optimism/README.md deleted file mode 100644 index 4d933f83..00000000 --- a/app/data/stacks/fixturenet-optimism/README.md +++ /dev/null @@ -1,123 +0,0 @@ -# fixturenet-optimism - -Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2) - -We support running just the L2 part of stack, given an external L1 endpoint. Follow the [L2 only doc](./l2-only.md) for the same. - -## Setup - -Clone required repositories: - -```bash -laconic-so --stack fixturenet-optimism setup-repositories - -# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command -``` - -Build the container images: - -```bash -laconic-so --stack fixturenet-optimism build-containers - -# If redeploying with changes in the stack containers -laconic-so --stack fixturenet-optimism build-containers --force-rebuild - -# If errors are thrown during build, old images used by this stack would have to be deleted -``` - -Note: this will take >10 mins depending on the specs of your machine, and **requires** 16GB of memory or greater. - -This should create the required docker images in the local image registry: -* `cerc/go-ethereum` -* `cerc/lighthouse` -* `cerc/fixturenet-eth-geth` -* `cerc/fixturenet-eth-lighthouse` -* `cerc/foundry` -* `cerc/optimism-contracts` -* `cerc/optimism-l2geth` -* `cerc/optimism-op-node` -* `cerc/optimism-op-batcher` -* `cerc/optimism-op-proposer` - -## Deploy - -Deploy the stack: - -```bash -laconic-so --stack fixturenet-optimism deploy up -``` - -The `fixturenet-optimism-contracts` service takes a while to complete running as it: -1. waits for the 'Merge' to happen on L1 -2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) -3. deploys the L1 contracts -It may restart a few times after running into errors. - -To list and monitor the running containers: - -```bash -laconic-so --stack fixturenet-optimism deploy ps - -# With status -docker ps - -# Check logs for a container -docker logs -f -``` - -## Clean up - -Stop all services running in the background: - -```bash -laconic-so --stack fixturenet-optimism deploy down 30 -``` - -Clear volumes created by this stack: - -```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" - -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") -``` - -## Troubleshooting - -* If `op-geth` service aborts or is restarted, the following error might occur in the `op-node` service: - - ```bash - WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found" - ``` - -* This means that the data directory that `op-geth` is using is corrupted and needs to be reinitialized; the containers `op-geth`, `op-node` and `op-batcher` need to be started afresh: - - WARNING: This will reset the L2 chain; consequently, all the data on it will be lost - - * Stop and remove the concerned containers: - - ```bash - # List the containers - docker ps -f "name=op-geth|op-node|op-batcher" - - # Force stop and remove the listed containers - docker rm -f $(docker ps -qf "name=op-geth|op-node|op-batcher") - ``` - - * Remove the concerned volume: - - ```bash - # List the volume - docker volume ls -q --filter name=l2_geth_data - - # Remove the listed volume - docker volume rm $(docker volume ls -q --filter name=l2_geth_data) - ``` - - * Re-run the deployment command used in [Deploy](#deploy) to restart the stopped containers - -## Known Issues - -* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side - * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) diff --git a/app/data/stacks/fixturenet-optimism/l2-only.md b/app/data/stacks/fixturenet-optimism/l2-only.md deleted file mode 100644 index 4e9daf43..00000000 --- a/app/data/stacks/fixturenet-optimism/l2-only.md +++ /dev/null @@ -1,100 +0,0 @@ -# fixturenet-optimism - -Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io) - -## Setup - -Prerequisite: An L1 Ethereum RPC endpoint - -Clone required repositories: - -```bash -laconic-so --stack fixturenet-optimism setup-repositories --exclude git.vdb.to/cerc-io/go-ethereum - -# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command -``` - -Build the container images: - -```bash -laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher,cerc/optimism-op-proposer -``` - -This should create the required docker images in the local image registry: -* `cerc/foundry` -* `cerc/optimism-contracts` -* `cerc/optimism-l2geth` -* `cerc/optimism-op-node` -* `cerc/optimism-op-batcher` -* `cerc/optimism-op-proposer` - -## Deploy - -Create and update an env file to be used in the next step ([defaults](../../config/fixturenet-optimism/l1-params.env)): - - ```bash - # External L1 endpoint - CERC_L1_CHAIN_ID= - CERC_L1_RPC= - CERC_L1_HOST= - CERC_L1_PORT= - - # URL to get CSV with credentials for accounts on L1 - # that are used to send balance to Optimism Proxy contract - # (enables them to do transactions on L2) - CERC_L1_ACCOUNTS_CSV_URL= - - # OR - # Specify the required account credentials - CERC_L1_ADDRESS= - CERC_L1_PRIV_KEY= - CERC_L1_ADDRESS_2= - CERC_L1_PRIV_KEY_2= - ``` - -* NOTE: If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port - -Deploy the stack: - -```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism --env-file up -``` - -The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: -1. waits for the 'Merge' to happen on L1 -2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) -3. deploys the L1 contracts - -To list down and monitor the running containers: - -```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism ps - -# With status -docker ps - -# Check logs for a container -docker logs -f -``` - -## Clean up - -Stop all services running in the background: - -```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism down 30 -``` - -Clear volumes created by this stack: - -```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" - -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") -``` - -## Troubleshooting - -See [Troubleshooting](./README.md#troubleshooting) diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py deleted file mode 100644 index 7cf0261d..00000000 --- a/app/deploy/k8s/deploy_k8s.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright © 2023 Vulcanize - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -from kubernetes import client, config -from app.deploy.deployer import Deployer - - -class K8sDeployer(Deployer): - name: str = "k8s" - - def __init__(self, compose_files, compose_project_name, compose_env_file) -> None: - config.load_kube_config() - self.client = client.CoreV1Api() - - def up(self, detach, services): - pass - - def down(self, timeout, volumes): - pass - - def ps(self): - pass - - def port(self, service, private_port): - pass - - def execute(self, service_name, command, envs): - pass - - def logs(self, services, tail, follow, stream): - pass - - def run(self, image, command, user, volumes, entrypoint=None): - pass diff --git a/docs/adding-a-new-stack.md b/docs/adding-a-new-stack.md index 4fbf27b2..2b2d1a65 100644 --- a/docs/adding-a-new-stack.md +++ b/docs/adding-a-new-stack.md @@ -8,7 +8,7 @@ Core to the feature completeness of stack orchestrator is to [decouple the tool ## Example -- in `app/data/stacks/my-new-stack/stack.yml` add: +- in `stack_orchestrator/data/stacks/my-new-stack/stack.yml` add: ```yaml version: "0.1" @@ -21,7 +21,7 @@ pods: - my-new-stack ``` -- in `app/data/container-build/cerc-my-new-stack/build.sh` add: +- in `stack_orchestrator/data/container-build/cerc-my-new-stack/build.sh` add: ```yaml #!/usr/bin/env bash @@ -30,7 +30,7 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh docker build -t cerc/my-new-stack:local -f ${CERC_REPO_BASE_DIR}/my-new-stack/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/my-new-stack ``` -- in `app/data/compose/docker-compose-my-new-stack.yml` add: +- in `stack_orchestrator/data/compose/docker-compose-my-new-stack.yml` add: ```yaml version: "3.2" @@ -43,20 +43,20 @@ services: - "0.0.0.0:3000:3000" ``` -- in `app/data/repository-list.txt` add: +- in `stack_orchestrator/data/repository-list.txt` add: ```bash github.com/my-org/my-new-stack ``` whereby that repository contains your source code and a `Dockerfile`, and matches the `repos:` field in the `stack.yml`. -- in `app/data/container-image-list.txt` add: +- in `stack_orchestrator/data/container-image-list.txt` add: ```bash cerc/my-new-stack ``` -- in `app/data/pod-list.txt` add: +- in `stack_orchestrator/data/pod-list.txt` add: ```bash my-new-stack diff --git a/docs/webapp.md b/docs/webapp.md new file mode 100644 index 00000000..fcf4ffcb --- /dev/null +++ b/docs/webapp.md @@ -0,0 +1,64 @@ +### Building and Running Webapps + +It is possible to build and run Next.js webapps using the `build-webapp` and `run-webapp` subcommands. + +To make it easier to build once and deploy into different environments and with different configuration, +compilation and static page generation are separated in the `build-webapp` and `run-webapp` steps. + +This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed +via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment, +not their build environment. + +## Building + +Building usually requires no additional configuration. By default, the Next.js version specified in `package.json` +is used, and either `yarn` or `npm` will be used automatically depending on which lock files are present. These +can be overidden with the build arguments `CERC_NEXT_VERSION` and `CERC_BUILD_TOOL` respectively. For example: `--extra-build-args "--build-arg CERC_NEXT_VERSION=13.4.12"` + +**Example**: +``` +$ cd ~/cerc +$ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git +$ laconic-so build-webapp --source-repo ~/cerc/test-progressive-web-app +... + +Built host container for ~/cerc/test-progressive-web-app with tag: + + cerc/test-progressive-web-app:local + +To test locally run: + + laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment.env + +``` + +## Running + +With `run-webapp` a new container will be launched on the local machine, with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration. + +**Example**: +``` +# Production env +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/production.env + +Image: cerc/test-progressive-web-app:local +ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05 +URL: http://localhost:32768 + +# Dev env +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/dev.env + +Image: cerc/test-progressive-web-app:local +ID: 9ab96494f563aafb6c057d88df58f9eca81b90f8721a4e068493a289a976051c +URL: http://localhost:32769 +``` + +## Deploying + +Use the subcommand `deploy-webapp create` to make a deployment directory that can be subsequently deployed to a Kubernetes cluster. +Example commands are shown below, assuming that the webapp container image `cerc/test-progressive-web-app:local` has already been built: +``` +$ laconic-so deploy-webapp create --kube-config ~/kubectl/k8s-kubeconfig.yaml --image-registry registry.digitalocean.com/laconic-registry --deployment-dir webapp-k8s-deployment --image cerc/test-progressive-web-app:local --url https://test-pwa-app.hosting.laconic.com/ --env-file test-webapp.env +$ laconic-so deployment --dir webapp-k8s-deployment push-images +$ laconic-so deployment --dir webapp-k8s-deployment start +``` diff --git a/requirements.txt b/requirements.txt index bf4845a1..bbf97b4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ python-decouple>=3.8 +python-dotenv==1.0.0 GitPython>=3.1.32 tqdm>=4.65.0 python-on-whales>=0.64.0 diff --git a/scripts/create_build_tag_file.sh b/scripts/create_build_tag_file.sh index c814a420..077abf31 100755 --- a/scripts/create_build_tag_file.sh +++ b/scripts/create_build_tag_file.sh @@ -1,6 +1,6 @@ -build_tag_file_name=./app/data/build_tag.txt +build_tag_file_name=./stack_orchestrator/data/build_tag.txt echo "# This file should be re-generated running: scripts/create_build_tag_file.sh script" > $build_tag_file_name -product_version_string=$( tail -1 ./app/data/version.txt ) +product_version_string=$( tail -1 ./stack_orchestrator/data/version.txt ) commit_string=$( git rev-parse --short HEAD ) timestamp_string=$(date +'%Y%m%d%H%M') build_tag_string=${product_version_string}-${commit_string}-${timestamp_string} diff --git a/setup.py b/setup.py index 86050fbc..d89dfc4d 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ setup( long_description=long_description, long_description_content_type="text/markdown", url='https://github.com/cerc-io/stack-orchestrator', - py_modules=['cli', 'app'], + py_modules=['stack_orchestrator'], packages=find_packages(), install_requires=[requirements], python_requires='>=3.7', @@ -25,6 +25,6 @@ setup( "Operating System :: OS Independent", ], entry_points={ - 'console_scripts': ['laconic-so=cli:cli'], + 'console_scripts': ['laconic-so=stack_orchestrator.main:cli'], } ) diff --git a/app/__init__.py b/stack_orchestrator/__init__.py similarity index 100% rename from app/__init__.py rename to stack_orchestrator/__init__.py diff --git a/app/__main__.py b/stack_orchestrator/__main__.py similarity index 100% rename from app/__main__.py rename to stack_orchestrator/__main__.py diff --git a/app/base.py b/stack_orchestrator/base.py similarity index 98% rename from app/base.py rename to stack_orchestrator/base.py index ba3504ba..811d085d 100644 --- a/app/base.py +++ b/stack_orchestrator/base.py @@ -15,7 +15,7 @@ import os from abc import ABC, abstractmethod -from app.deploy.deploy import get_stack_status +from stack_orchestrator.deploy.deploy import get_stack_status from decouple import config diff --git a/app/build/__init__.py b/stack_orchestrator/build/__init__.py similarity index 100% rename from app/build/__init__.py rename to stack_orchestrator/build/__init__.py diff --git a/app/build/build_containers.py b/stack_orchestrator/build/build_containers.py similarity index 52% rename from app/build/build_containers.py rename to stack_orchestrator/build/build_containers.py index ee74b807..e987c504 100644 --- a/app/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -27,12 +27,97 @@ import subprocess import click import importlib.resources from pathlib import Path -from app.util import include_exclude_check, get_parsed_stack_config -from app.base import get_npm_registry_url +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external +from stack_orchestrator.base import get_npm_registry_url # TODO: find a place for this # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" +def make_container_build_env(dev_root_path: str, + container_build_dir: str, + debug: bool, + force_rebuild: bool, + extra_build_args: str): + container_build_env = { + "CERC_NPM_REGISTRY_URL": get_npm_registry_url(), + "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""), + "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), + "CERC_REPO_BASE_DIR": dev_root_path, + "CERC_CONTAINER_BASE_DIR": container_build_dir, + "CERC_HOST_UID": f"{os.getuid()}", + "CERC_HOST_GID": f"{os.getgid()}", + "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0") + } + container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) + container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + docker_host_env = os.getenv("DOCKER_HOST") + if docker_host_env: + container_build_env.update({"DOCKER_HOST": docker_host_env}) + + return container_build_env + + +def process_container(stack: str, + container, + container_build_dir: str, + container_build_env: dict, + dev_root_path: str, + quiet: bool, + verbose: bool, + dry_run: bool, + continue_on_error: bool, + ): + if not quiet: + print(f"Building: {container}") + + default_container_tag = f"{container}:local" + container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}) + + # Check if this is in an external stack + if stack_is_external(stack): + container_parent_dir = Path(stack).joinpath("container-build") + temp_build_dir = container_parent_dir.joinpath(container.replace("/", "-")) + temp_build_script_filename = temp_build_dir.joinpath("build.sh") + # Now check if the container exists in the external stack. + if not temp_build_script_filename.exists(): + # If not, revert to building an internal container + container_parent_dir = container_build_dir + else: + container_parent_dir = container_build_dir + + build_dir = container_parent_dir.joinpath(container.replace("/", "-")) + build_script_filename = build_dir.joinpath("build.sh") + + if verbose: + print(f"Build script filename: {build_script_filename}") + if os.path.exists(build_script_filename): + build_command = build_script_filename.as_posix() + else: + if verbose: + print(f"No script file found: {build_script_filename}, using default build script") + repo_dir = container.split('/')[1] + # TODO: make this less of a hack -- should be specified in some metadata somewhere + # Check if we have a repo for this container. If not, set the context dir to the container-build subdir + repo_full_path = os.path.join(dev_root_path, repo_dir) + repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir + build_command = os.path.join(container_build_dir, + "default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}" + if not dry_run: + if verbose: + print(f"Executing: {build_command} with environment: {container_build_env}") + build_result = subprocess.run(build_command, shell=True, env=container_build_env) + if verbose: + print(f"Return code is: {build_result.returncode}") + if build_result.returncode != 0: + print(f"Error running build for {container}") + if not continue_on_error: + print("FATAL Error: container build failed and --continue-on-error not set, exiting") + sys.exit(1) + else: + print("****** Container Build Error, continuing because --continue-on-error is set") + else: + print("Skipped") @click.command() @click.option('--include', help="only build these containers") @@ -67,7 +152,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): print('Dev root directory doesn\'t exist, creating') # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file: all_containers = container_list_file.read().splitlines() @@ -83,61 +168,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): if stack: print(f"Stack: {stack}") - # TODO: make this configurable - container_build_env = { - "CERC_NPM_REGISTRY_URL": get_npm_registry_url(), - "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""), - "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), - "CERC_REPO_BASE_DIR": dev_root_path, - "CERC_CONTAINER_BASE_DIR": container_build_dir, - "CERC_HOST_UID": f"{os.getuid()}", - "CERC_HOST_GID": f"{os.getgid()}", - "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0") - } - container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) - container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) - container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) - docker_host_env = os.getenv("DOCKER_HOST") - if docker_host_env: - container_build_env.update({"DOCKER_HOST": docker_host_env}) - - def process_container(container): - if not quiet: - print(f"Building: {container}") - build_dir = os.path.join(container_build_dir, container.replace("/", "-")) - build_script_filename = os.path.join(build_dir, "build.sh") - if verbose: - print(f"Build script filename: {build_script_filename}") - if os.path.exists(build_script_filename): - build_command = build_script_filename - else: - if verbose: - print(f"No script file found: {build_script_filename}, using default build script") - repo_dir = container.split('/')[1] - # TODO: make this less of a hack -- should be specified in some metadata somewhere - # Check if we have a repo for this container. If not, set the context dir to the container-build subdir - repo_full_path = os.path.join(dev_root_path, repo_dir) - repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir - build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" - if not dry_run: - if verbose: - print(f"Executing: {build_command} with environment: {container_build_env}") - build_result = subprocess.run(build_command, shell=True, env=container_build_env) - if verbose: - print(f"Return code is: {build_result.returncode}") - if build_result.returncode != 0: - print(f"Error running build for {container}") - if not continue_on_error: - print("FATAL Error: container build failed and --continue-on-error not set, exiting") - sys.exit(1) - else: - print("****** Container Build Error, continuing because --continue-on-error is set") - else: - print("Skipped") + container_build_env = make_container_build_env(dev_root_path, + container_build_dir, + debug, + force_rebuild, + extra_build_args) for container in containers_in_scope: if include_exclude_check(container, include, exclude): - process_container(container) + process_container(stack, container, container_build_dir, container_build_env, + dev_root_path, quiet, verbose, dry_run, continue_on_error) else: if verbose: print(f"Excluding: {container}") diff --git a/app/build/build_npms.py b/stack_orchestrator/build/build_npms.py similarity index 97% rename from app/build/build_npms.py rename to stack_orchestrator/build/build_npms.py index 2ffbea1b..c8e3af43 100644 --- a/app/build/build_npms.py +++ b/stack_orchestrator/build/build_npms.py @@ -25,8 +25,8 @@ from decouple import config import click import importlib.resources from python_on_whales import docker, DockerException -from app.base import get_stack -from app.util import include_exclude_check, get_parsed_stack_config +from stack_orchestrator.base import get_stack +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config builder_js_image_name = "cerc/builder-js:local" @@ -83,7 +83,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): os.makedirs(build_root_path) # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: all_packages = package_list_file.read().splitlines() diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py new file mode 100644 index 00000000..287347eb --- /dev/null +++ b/stack_orchestrator/build/build_webapp.py @@ -0,0 +1,81 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds webapp containers + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import os +from decouple import config +import click +from pathlib import Path +from stack_orchestrator.build import build_containers + + +@click.command() +@click.option('--base-container', default="cerc/nextjs-base") +@click.option('--source-repo', help="directory containing the webapp to build", required=True) +@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") +@click.option("--extra-build-args", help="Supply extra arguments to build") +@click.option("--tag", help="Container tag (default: cerc/:local)") +@click.pass_context +def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag): + '''build the specified webapp container''' + + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + dry_run = ctx.obj.dry_run + debug = ctx.obj.debug + local_stack = ctx.obj.local_stack + stack = ctx.obj.stack + continue_on_error = ctx.obj.continue_on_error + + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + + if local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + if not quiet: + print(f'Dev Root is: {dev_root_path}') + + # First build the base container. + container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug, + force_rebuild, extra_build_args) + + build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, + verbose, dry_run, continue_on_error) + + + # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir. + container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true" + container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo) + container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, + base_container.replace("/", "-"), + "Dockerfile.webapp") + if not tag: + webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] + container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" + else: + container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag + + build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, + verbose, dry_run, continue_on_error) diff --git a/app/command_types.py b/stack_orchestrator/command_types.py similarity index 100% rename from app/command_types.py rename to stack_orchestrator/command_types.py diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py new file mode 100644 index 00000000..596b0c1b --- /dev/null +++ b/stack_orchestrator/constants.py @@ -0,0 +1,32 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +cluster_name_prefix = "laconic-" +stack_file_name = "stack.yml" +spec_file_name = "spec.yml" +config_file_name = "config.env" +deployment_file_name = "deployment.yml" +compose_dir_name = "compose" +compose_deploy_type = "compose" +k8s_kind_deploy_type = "k8s-kind" +k8s_deploy_type = "k8s" +cluster_id_key = "cluster-id" +kube_config_key = "kube-config" +deploy_to_key = "deploy-to" +network_key = "network" +http_proxy_key = "http-proxy" +image_resigtry_key = "image-registry" +kind_config_filename = "kind-config.yml" +kube_config_filename = "kubeconfig.yml" diff --git a/app/data/__init__.py b/stack_orchestrator/data/__init__.py similarity index 100% rename from app/data/__init__.py rename to stack_orchestrator/data/__init__.py diff --git a/app/data/compose/docker-compose-contract-sushiswap.yml b/stack_orchestrator/data/compose/docker-compose-contract-sushiswap.yml similarity index 100% rename from app/data/compose/docker-compose-contract-sushiswap.yml rename to stack_orchestrator/data/compose/docker-compose-contract-sushiswap.yml diff --git a/app/data/compose/docker-compose-contract.yml b/stack_orchestrator/data/compose/docker-compose-contract.yml similarity index 100% rename from app/data/compose/docker-compose-contract.yml rename to stack_orchestrator/data/compose/docker-compose-contract.yml diff --git a/app/data/compose/docker-compose-eth-probe.yml b/stack_orchestrator/data/compose/docker-compose-eth-probe.yml similarity index 100% rename from app/data/compose/docker-compose-eth-probe.yml rename to stack_orchestrator/data/compose/docker-compose-eth-probe.yml diff --git a/app/data/compose/docker-compose-eth-statediff-fill-service.yml b/stack_orchestrator/data/compose/docker-compose-eth-statediff-fill-service.yml similarity index 100% rename from app/data/compose/docker-compose-eth-statediff-fill-service.yml rename to stack_orchestrator/data/compose/docker-compose-eth-statediff-fill-service.yml diff --git a/app/data/compose/docker-compose-fixturenet-eth-metrics.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-eth-metrics.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-eth-metrics.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-eth-metrics.yml diff --git a/app/data/compose/docker-compose-fixturenet-eth.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-eth.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-eth.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-eth.yml diff --git a/app/data/compose/docker-compose-fixturenet-laconic-console.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml similarity index 95% rename from app/data/compose/docker-compose-fixturenet-laconic-console.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml index da2fd95f..a186e761 100644 --- a/app/data/compose/docker-compose-fixturenet-laconic-console.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml @@ -4,6 +4,6 @@ services: image: cerc/laconic-console-host:local environment: - CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production} - - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost} + - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473} ports: - "80" diff --git a/app/data/compose/docker-compose-fixturenet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml similarity index 95% rename from app/data/compose/docker-compose-fixturenet-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml index 641229d4..7b48f60d 100644 --- a/app/data/compose/docker-compose-fixturenet-laconicd.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml @@ -5,7 +5,7 @@ services: command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] volumes: # The cosmos-sdk node's database directory: - - laconicd-data:/root/.laconicd/data + - laconicd-data:/root/.laconicd # TODO: look at folding these scripts into the container - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh diff --git a/app/data/compose/docker-compose-fixturenet-lotus.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-lotus.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-lotus.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-lotus.yml diff --git a/app/data/compose/docker-compose-fixturenet-optimism.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml similarity index 69% rename from app/data/compose/docker-compose-fixturenet-optimism.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml index ddf7e290..fe1eac50 100644 --- a/app/data/compose/docker-compose-fixturenet-optimism.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml @@ -6,8 +6,8 @@ services: # Deploys the L1 smart contracts (outputs to volume l1_deployment) fixturenet-optimism-contracts: restart: on-failure - hostname: fixturenet-optimism-contracts image: cerc/optimism-contracts:local + hostname: fixturenet-optimism-contracts env_file: - ../config/fixturenet-optimism/l1-params.env environment: @@ -17,27 +17,49 @@ services: CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} CERC_L1_ADDRESS: ${CERC_L1_ADDRESS} CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY} - CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2} - CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2} - # Waits for L1 endpoint to be up before running the script - command: | - "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh" volumes: - ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh - - ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts - - ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts - - ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts - - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js - - ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh + - ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh - l2_accounts:/l2-accounts - - l1_deployment:/app/packages/contracts-bedrock + - l1_deployment:/l1-deployment + - l2_config:/l2-config + # Waits for L1 endpoint to be up before running the contract deploy script + command: | + "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh" + + # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) + op-geth: + restart: always + image: cerc/optimism-l2geth:local + hostname: op-geth + depends_on: + op-node: + condition: service_started + volumes: + - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh + - l2_config:/l2-config:ro + - l2_accounts:/l2-accounts:ro + - l2_geth_data:/datadir + entrypoint: "sh" + command: "/run-op-geth.sh" + ports: + - "8545" + - "8546" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost:8545"] + interval: 30s + timeout: 10s + retries: 100 + start_period: 10s extra_hosts: - "host.docker.internal:host-gateway" - # Generates the config files required for L2 (outputs to volume l2_config) - op-node-l2-config-gen: - restart: on-failure + # Runs the L2 consensus client (Sequencer node) + # Generates the L2 config files if not already present (outputs to volume l2_config) + op-node: + restart: always image: cerc/optimism-op-node:local + hostname: op-node depends_on: fixturenet-optimism-contracts: condition: service_completed_successfully @@ -47,61 +69,19 @@ services: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_L1_RPC: ${CERC_L1_RPC} volumes: - - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh - - l1_deployment:/contracts-bedrock:ro - - l2_config:/app - command: ["sh", "/app/generate-l2-config.sh"] - extra_hosts: - - "host.docker.internal:host-gateway" - - # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) - op-geth: - restart: always - image: cerc/optimism-l2geth:local - depends_on: - op-node-l2-config-gen: - condition: service_started - volumes: - - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh - - l2_config:/op-node:ro + - ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh + - l1_deployment:/l1-deployment:ro + - l2_config:/l2-config - l2_accounts:/l2-accounts:ro - - l2_geth_data:/datadir entrypoint: "sh" - command: "/run-op-geth.sh" + command: "/run-op-node.sh" ports: - - "0.0.0.0:8545:8545" - - "0.0.0.0:8546:8546" - healthcheck: - test: ["CMD", "nc", "-vz", "localhost:8545"] - interval: 30s - timeout: 10s - retries: 10 - start_period: 10s - - # Runs the L2 consensus client (Sequencer node) - op-node: - restart: always - image: cerc/optimism-op-node:local - depends_on: - op-geth: - condition: service_healthy - env_file: - - ../config/fixturenet-optimism/l1-params.env - environment: - CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} - CERC_L1_RPC: ${CERC_L1_RPC} - volumes: - - ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh - - l2_config:/op-node-data:ro - - l2_accounts:/l2-accounts:ro - command: ["sh", "/app/run-op-node.sh"] - ports: - - "0.0.0.0:8547:8547" + - "8547" healthcheck: test: ["CMD", "nc", "-vz", "localhost:8547"] interval: 30s timeout: 10s - retries: 10 + retries: 100 start_period: 10s extra_hosts: - "host.docker.internal:host-gateway" @@ -110,6 +90,7 @@ services: op-batcher: restart: always image: cerc/optimism-op-batcher:local + hostname: op-batcher depends_on: op-node: condition: service_healthy @@ -129,7 +110,7 @@ services: command: | "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh" ports: - - "127.0.0.1:8548:8548" + - "8548" extra_hosts: - "host.docker.internal:host-gateway" @@ -137,25 +118,29 @@ services: op-proposer: restart: always image: cerc/optimism-op-proposer:local + hostname: op-proposer depends_on: op-node: condition: service_healthy + op-geth: + condition: service_healthy env_file: - ../config/fixturenet-optimism/l1-params.env environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_L1_RPC: ${CERC_L1_RPC} + CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID} volumes: - ../config/network/wait-for-it.sh:/wait-for-it.sh - ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh - - l1_deployment:/contracts-bedrock:ro + - l1_deployment:/l1-deployment:ro - l2_accounts:/l2-accounts:ro entrypoint: ["sh", "-c"] # Waits for L1 endpoint to be up before running the proposer command: | "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh" ports: - - "127.0.0.1:8560:8560" + - "8560" extra_hosts: - "host.docker.internal:host-gateway" diff --git a/app/data/compose/docker-compose-fixturenet-plugeth.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-plugeth.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-plugeth.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-plugeth.yml diff --git a/app/data/compose/docker-compose-fixturenet-pocket.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-pocket.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-pocket.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-pocket.yml diff --git a/app/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml diff --git a/app/data/compose/docker-compose-foundry.yml b/stack_orchestrator/data/compose/docker-compose-foundry.yml similarity index 100% rename from app/data/compose/docker-compose-foundry.yml rename to stack_orchestrator/data/compose/docker-compose-foundry.yml diff --git a/app/data/compose/docker-compose-go-ethereum-foundry.yml b/stack_orchestrator/data/compose/docker-compose-go-ethereum-foundry.yml similarity index 100% rename from app/data/compose/docker-compose-go-ethereum-foundry.yml rename to stack_orchestrator/data/compose/docker-compose-go-ethereum-foundry.yml diff --git a/app/data/compose/docker-compose-go-nitro.yml b/stack_orchestrator/data/compose/docker-compose-go-nitro.yml similarity index 100% rename from app/data/compose/docker-compose-go-nitro.yml rename to stack_orchestrator/data/compose/docker-compose-go-nitro.yml diff --git a/app/data/compose/docker-compose-graph-node.yml b/stack_orchestrator/data/compose/docker-compose-graph-node.yml similarity index 100% rename from app/data/compose/docker-compose-graph-node.yml rename to stack_orchestrator/data/compose/docker-compose-graph-node.yml diff --git a/app/data/compose/docker-compose-ipld-eth-beacon-db.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-db.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-beacon-db.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-db.yml diff --git a/app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-indexer.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-indexer.yml diff --git a/app/data/compose/docker-compose-ipld-eth-db.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-db.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-db.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-db.yml diff --git a/app/data/compose/docker-compose-ipld-eth-server-payments.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-server-payments.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-server-payments.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-server-payments.yml diff --git a/app/data/compose/docker-compose-ipld-eth-server.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-server.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-server.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-server.yml diff --git a/app/data/compose/docker-compose-keycloak.yml b/stack_orchestrator/data/compose/docker-compose-keycloak.yml similarity index 100% rename from app/data/compose/docker-compose-keycloak.yml rename to stack_orchestrator/data/compose/docker-compose-keycloak.yml diff --git a/app/data/compose/docker-compose-kubo.yml b/stack_orchestrator/data/compose/docker-compose-kubo.yml similarity index 100% rename from app/data/compose/docker-compose-kubo.yml rename to stack_orchestrator/data/compose/docker-compose-kubo.yml diff --git a/app/data/compose/docker-compose-laconic-dot-com.yml b/stack_orchestrator/data/compose/docker-compose-laconic-dot-com.yml similarity index 100% rename from app/data/compose/docker-compose-laconic-dot-com.yml rename to stack_orchestrator/data/compose/docker-compose-laconic-dot-com.yml diff --git a/app/data/compose/docker-compose-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-laconicd.yml similarity index 100% rename from app/data/compose/docker-compose-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-laconicd.yml diff --git a/app/data/compose/docker-compose-lasso.yml b/stack_orchestrator/data/compose/docker-compose-lasso.yml similarity index 100% rename from app/data/compose/docker-compose-lasso.yml rename to stack_orchestrator/data/compose/docker-compose-lasso.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-api-proxy.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-api-proxy.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-api-proxy.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-api-proxy.yml diff --git a/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml new file mode 100644 index 00000000..49cc2de3 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml @@ -0,0 +1,29 @@ +version: "3.2" + +services: + migrations: + restart: on-failure + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-db:local + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + + ipld-eth-db: + image: timescale/timescaledb:2.8.1-pg14 + restart: always + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + volumes: + - mainnet_eth_ipld_eth_db:/var/lib/postgresql/data + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + - "5432" +volumes: + mainnet_eth_ipld_eth_db: diff --git a/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml new file mode 100644 index 00000000..4341c6a1 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml @@ -0,0 +1,24 @@ +version: "3.7" +services: + ipld-eth-server: + restart: always + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-server:local + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + - ../config/mainnet-eth-ipld-eth-server/srv.env + volumes: + - ../config/mainnet-eth-ipld-eth-server/config.toml:/app/config.toml:ro + ports: + - "8081" + - "8082" + - "8090" + - "40001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8081"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s diff --git a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-keycloak.yml similarity index 94% rename from app/data/compose/docker-compose-mainnet-eth-keycloak.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-keycloak.yml index dfa9a804..1674c62e 100644 --- a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml +++ b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-keycloak.yml @@ -6,7 +6,7 @@ services: env_file: - ../config/mainnet-eth-keycloak/keycloak.env healthcheck: - test: ["CMD", "nc", "-v", "localhost", "5432"] + test: ["CMD", "nc", "-v", "localhost", "35432"] interval: 30s timeout: 10s retries: 10 @@ -14,7 +14,7 @@ services: volumes: - mainnet_eth_keycloak_db:/var/lib/postgresql/data ports: - - 5432 + - 35432 keycloak: image: cerc/keycloak:local diff --git a/app/data/compose/docker-compose-mainnet-eth-metrics.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-metrics.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-metrics.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-metrics.yml diff --git a/stack_orchestrator/data/compose/docker-compose-mainnet-eth-plugeth.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-plugeth.yml new file mode 100644 index 00000000..a8b301d2 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-plugeth.yml @@ -0,0 +1,72 @@ + +services: + + mainnet-eth-geth-1: + restart: always + hostname: mainnet-eth-geth-1 + cap_add: + - SYS_PTRACE + image: cerc/plugeth-with-plugins:local + entrypoint: /bin/sh + command: -c "/opt/run-geth.sh" + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + - ../config/mainnet-eth-plugeth/geth.env + volumes: + - mainnet_eth_plugeth_geth_1_data:/data + - mainnet_eth_plugeth_config_data:/etc/mainnet-eth + - ../config/mainnet-eth-plugeth/scripts/run-geth.sh:/opt/run-geth.sh + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8545"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + # http api + - "8545" + # ws api + - "8546" + # ws el + - "8551" + # p2p + - "30303" + - "30303/udp" + # debugging + - "40000" + # metrics + - "6060" + + mainnet-eth-lighthouse-1: + restart: always + hostname: mainnet-eth-lighthouse-1 + healthcheck: + test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:5052/eth/v2/beacon/blocks/head"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 30s + environment: + LIGHTHOUSE_EXECUTION_ENDPOINT: "http://mainnet-eth-geth-1:8551" + env_file: + - ../config/mainnet-eth-plugeth/lighthouse.env + image: cerc/lighthouse:local + entrypoint: /bin/sh + command: -c "/opt/run-lighthouse.sh" + volumes: + - mainnet_eth_plugeth_lighthouse_1_data:/data + - mainnet_eth_plugeth_config_data:/etc/mainnet-eth + - ../config/mainnet-eth-plugeth/scripts/run-lighthouse.sh:/opt/run-lighthouse.sh + ports: + # api + - "5052" + # metrics + - "5054" + # p2p + - "9000" + - "9000/udp" + +volumes: + mainnet_eth_plugeth_config_data: + mainnet_eth_plugeth_geth_1_data: + mainnet_eth_plugeth_lighthouse_1_data: diff --git a/app/data/compose/docker-compose-mainnet-eth.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth.yml diff --git a/app/data/compose/docker-compose-mainnet-go-opera.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-go-opera.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-go-opera.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-go-opera.yml diff --git a/app/data/compose/docker-compose-mainnet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-laconicd.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-laconicd.yml diff --git a/app/data/compose/docker-compose-mobymask-app-v3.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-app-v3.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-app-v3.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-app-v3.yml diff --git a/app/data/compose/docker-compose-mobymask-app.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-app.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-app.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-app.yml diff --git a/app/data/compose/docker-compose-mobymask-snap.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-snap.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-snap.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-snap.yml diff --git a/app/data/compose/docker-compose-nitro-contracts.yml b/stack_orchestrator/data/compose/docker-compose-nitro-contracts.yml similarity index 100% rename from app/data/compose/docker-compose-nitro-contracts.yml rename to stack_orchestrator/data/compose/docker-compose-nitro-contracts.yml diff --git a/app/data/compose/docker-compose-nitro-rpc-client.yml b/stack_orchestrator/data/compose/docker-compose-nitro-rpc-client.yml similarity index 100% rename from app/data/compose/docker-compose-nitro-rpc-client.yml rename to stack_orchestrator/data/compose/docker-compose-nitro-rpc-client.yml diff --git a/app/data/compose/docker-compose-peer-test-app.yml b/stack_orchestrator/data/compose/docker-compose-peer-test-app.yml similarity index 100% rename from app/data/compose/docker-compose-peer-test-app.yml rename to stack_orchestrator/data/compose/docker-compose-peer-test-app.yml diff --git a/app/data/compose/docker-compose-ponder-indexer.yml b/stack_orchestrator/data/compose/docker-compose-ponder-indexer.yml similarity index 100% rename from app/data/compose/docker-compose-ponder-indexer.yml rename to stack_orchestrator/data/compose/docker-compose-ponder-indexer.yml diff --git a/app/data/compose/docker-compose-ponder-watcher.yml b/stack_orchestrator/data/compose/docker-compose-ponder-watcher.yml similarity index 100% rename from app/data/compose/docker-compose-ponder-watcher.yml rename to stack_orchestrator/data/compose/docker-compose-ponder-watcher.yml diff --git a/stack_orchestrator/data/compose/docker-compose-proxy-server.yml b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml new file mode 100644 index 00000000..607e8d23 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml @@ -0,0 +1,22 @@ +version: "3.2" + +services: + proxy-server: + image: cerc/watcher-ts:local + restart: on-failure + working_dir: /app/packages/cli + environment: + ENABLE_PROXY: ${ENABLE_PROXY:-true} + PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM} + PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER} + command: ["sh", "-c", "./run.sh"] + volumes: + - ../config/proxy-server/run.sh:/app/packages/cli/run.sh + ports: + - "4000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "4000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s diff --git a/app/data/compose/docker-compose-reth.yml b/stack_orchestrator/data/compose/docker-compose-reth.yml similarity index 100% rename from app/data/compose/docker-compose-reth.yml rename to stack_orchestrator/data/compose/docker-compose-reth.yml diff --git a/app/data/compose/docker-compose-sushiswap-subgraph-v3.yml b/stack_orchestrator/data/compose/docker-compose-sushiswap-subgraph-v3.yml similarity index 100% rename from app/data/compose/docker-compose-sushiswap-subgraph-v3.yml rename to stack_orchestrator/data/compose/docker-compose-sushiswap-subgraph-v3.yml diff --git a/app/data/compose/docker-compose-test.yml b/stack_orchestrator/data/compose/docker-compose-test.yml similarity index 100% rename from app/data/compose/docker-compose-test.yml rename to stack_orchestrator/data/compose/docker-compose-test.yml diff --git a/app/data/compose/docker-compose-tx-spammer.yml b/stack_orchestrator/data/compose/docker-compose-tx-spammer.yml similarity index 100% rename from app/data/compose/docker-compose-tx-spammer.yml rename to stack_orchestrator/data/compose/docker-compose-tx-spammer.yml diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml new file mode 100644 index 00000000..85b71af2 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -0,0 +1,17 @@ +version: "3.2" + +services: + uniswap-interface: + image: cerc/uniswap-interface:local + restart: on-failure + environment: + - REACT_APP_INFURA_KEY=${CERC_INFURA_KEY} + - REACT_APP_AWS_API_ENDPOINT=${CERC_UNISWAP_GQL} + command: ["./build-app.sh"] + volumes: + - app_builds:/app-builds + - ../config/uniswap-interface/build-app.sh:/app/build-app.sh + +volumes: + app_builds: + app_globs: diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml new file mode 100644 index 00000000..31fa99bf --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml @@ -0,0 +1,46 @@ +version: '3.7' + +services: + urbit-fake-ship: + restart: unless-stopped + image: tloncorp/vere + environment: + CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs-glob-host:5001} + CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs-glob-host:8080} + entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"] + volumes: + - urbit_data:/urbit + - app_builds:/app-builds + - app_globs:/app-globs + - ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh + - ../config/uniswap-interface/deploy-uniswap-app.sh:/urbit/deploy-uniswap-app.sh + ports: + - "80" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "80"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + ipfs-glob-host: + image: ipfs/kubo:master-2023-02-20-714a968 + volumes: + - ipfs-import:/import + - ipfs-data:/data/ipfs + ports: + - "8080" + - "5001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + +volumes: + urbit_data: + app_builds: + app_globs: + ipfs-import: + ipfs-data: diff --git a/app/data/compose/docker-compose-watcher-azimuth.yml b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml similarity index 51% rename from app/data/compose/docker-compose-watcher-azimuth.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml index 327c77fc..48e77082 100644 --- a/app/data/compose/docker-compose-watcher-azimuth.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml @@ -10,6 +10,7 @@ services: - POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue - POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto, - POSTGRES_PASSWORD=password + command: ["postgres", "-c", "max_connections=200"] volumes: - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - watcher_db_data:/var/lib/postgresql/data @@ -22,6 +23,38 @@ services: retries: 15 start_period: 10s + # Starts the azimuth-watcher job runner + azimuth-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CERC_HISTORICAL_BLOCK_RANGE: 500 + CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB + CONTRACT_NAME: Azimuth + STARTING_BLOCK: 6784880 + working_dir: /app/packages/azimuth-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/azimuth-watcher/start-job-runner.sh + ports: + - "9000" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the azimuth-watcher server azimuth-watcher-server: image: cerc/watcher-azimuth:local @@ -29,8 +62,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + azimuth-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -52,6 +85,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the censures-watcher job runner + censures-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189 + CONTRACT_NAME: Censures + STARTING_BLOCK: 6784954 + working_dir: /app/packages/censures-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/censures-watcher/start-job-runner.sh + ports: + - "9002" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9002"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the censures-watcher server censures-watcher-server: image: cerc/watcher-azimuth:local @@ -59,8 +123,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + censures-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -82,6 +146,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the claims-watcher job runner + claims-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A + CONTRACT_NAME: Claims + STARTING_BLOCK: 6784941 + working_dir: /app/packages/claims-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/claims-watcher/start-job-runner.sh + ports: + - "9004" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9004"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the claims-watcher server claims-watcher-server: image: cerc/watcher-azimuth:local @@ -89,8 +184,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + claims-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -112,6 +207,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the conditional-star-release-watcher job runner + conditional-star-release-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA + CONTRACT_NAME: ConditionalStarRelease + STARTING_BLOCK: 6828004 + working_dir: /app/packages/conditional-star-release-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/conditional-star-release-watcher/start-job-runner.sh + ports: + - "9006" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9006"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the conditional-star-release-watcher server conditional-star-release-watcher-server: image: cerc/watcher-azimuth:local @@ -119,8 +245,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + conditional-star-release-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -142,6 +268,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the delegated-sending-watcher job runner + delegated-sending-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76 + CONTRACT_NAME: DelegatedSending + STARTING_BLOCK: 6784956 + working_dir: /app/packages/delegated-sending-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/delegated-sending-watcher/start-job-runner.sh + ports: + - "9008" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the delegated-sending-watcher server delegated-sending-watcher-server: image: cerc/watcher-azimuth:local @@ -149,8 +306,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + delegated-sending-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -172,6 +329,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the ecliptic-watcher job runner + ecliptic-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73 + CONTRACT_NAME: Ecliptic + STARTING_BLOCK: 13692129 + working_dir: /app/packages/ecliptic-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/ecliptic-watcher/start-job-runner.sh + ports: + - "9010" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9010"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the ecliptic-watcher server ecliptic-watcher-server: image: cerc/watcher-azimuth:local @@ -179,8 +367,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + ecliptic-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -202,6 +390,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the linear-star-release-watcher job runner + linear-star-release-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801 + CONTRACT_NAME: LinearStarRelease + STARTING_BLOCK: 6784943 + working_dir: /app/packages/linear-star-release-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/linear-star-release-watcher/start-job-runner.sh + ports: + - "9012" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9012"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the linear-star-release-watcher server linear-star-release-watcher-server: image: cerc/watcher-azimuth:local @@ -209,8 +428,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + linear-star-release-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -232,6 +451,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the polls-watcher job runner + polls-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4 + CONTRACT_NAME: Polls + STARTING_BLOCK: 6784912 + working_dir: /app/packages/polls-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/polls-watcher/start-job-runner.sh + ports: + - "9014" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9014"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the polls-watcher server polls-watcher-server: image: cerc/watcher-azimuth:local @@ -239,8 +489,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + polls-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} diff --git a/app/data/compose/docker-compose-watcher-erc20.yml b/stack_orchestrator/data/compose/docker-compose-watcher-erc20.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-erc20.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-erc20.yml diff --git a/app/data/compose/docker-compose-watcher-erc721.yml b/stack_orchestrator/data/compose/docker-compose-watcher-erc721.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-erc721.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-erc721.yml diff --git a/app/data/compose/docker-compose-watcher-gelato.yml b/stack_orchestrator/data/compose/docker-compose-watcher-gelato.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-gelato.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-gelato.yml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml new file mode 100644 index 00000000..0a83af89 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml @@ -0,0 +1,76 @@ +version: '3.2' + +services: + merkl-sushiswap-v3-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=merkl-sushiswap-v3-watcher,merkl-sushiswap-v3-watcher-job-queue + - POSTGRES_EXTENSION=merkl-sushiswap-v3-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - merkl_sushiswap_v3_watcher_db_data:/var/lib/postgresql/data + ports: + - "5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + merkl-sushiswap-v3-watcher-job-runner: + restart: unless-stopped + depends_on: + merkl-sushiswap-v3-watcher-db: + condition: service_healthy + image: cerc/watcher-merkl-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-job-runner.sh"] + volumes: + - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh + ports: + - "9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + merkl-sushiswap-v3-watcher-server: + restart: unless-stopped + depends_on: + merkl-sushiswap-v3-watcher-db: + condition: service_healthy + merkl-sushiswap-v3-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-merkl-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-server.sh"] + volumes: + - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh + ports: + - "127.0.0.1:3007:3008" + - "9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + merkl_sushiswap_v3_watcher_db_data: diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v2.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask-v2.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v2.yml diff --git a/app/data/compose/docker-compose-watcher-mobymask-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v3.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask-v3.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v3.yml diff --git a/app/data/compose/docker-compose-watcher-mobymask.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask.yml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml new file mode 100644 index 00000000..f7b75ca5 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml @@ -0,0 +1,76 @@ +version: '3.2' + +services: + sushiswap-v3-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=sushiswap-v3-watcher,sushiswap-v3-watcher-job-queue + - POSTGRES_EXTENSION=sushiswap-v3-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - sushiswap_v3_watcher_db_data:/var/lib/postgresql/data + ports: + - "5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + sushiswap-v3-watcher-job-runner: + restart: unless-stopped + depends_on: + sushiswap-v3-watcher-db: + condition: service_healthy + image: cerc/watcher-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-job-runner.sh"] + volumes: + - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh + ports: + - "9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + sushiswap-v3-watcher-server: + restart: unless-stopped + depends_on: + sushiswap-v3-watcher-db: + condition: service_healthy + sushiswap-v3-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-server.sh"] + volumes: + - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh + ports: + - "127.0.0.1:3008:3008" + - "9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + sushiswap_v3_watcher_db_data: diff --git a/app/data/compose/docker-compose-watcher-sushiswap.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-sushiswap.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-sushiswap.yml diff --git a/app/data/compose/docker-compose-watcher-uniswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-uniswap-v3.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-uniswap-v3.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-uniswap-v3.yml diff --git a/stack_orchestrator/data/compose/docker-compose-webapp-template.yml b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml new file mode 100644 index 00000000..b8697afa --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml @@ -0,0 +1,8 @@ +services: + webapp: + image: cerc/webapp-container:local + restart: always + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + ports: + - "3000" diff --git a/app/data/config/contract-sushiswap/deploy-core-contracts.sh b/stack_orchestrator/data/config/contract-sushiswap/deploy-core-contracts.sh similarity index 100% rename from app/data/config/contract-sushiswap/deploy-core-contracts.sh rename to stack_orchestrator/data/config/contract-sushiswap/deploy-core-contracts.sh diff --git a/app/data/config/contract-sushiswap/deploy-periphery-contracts.sh b/stack_orchestrator/data/config/contract-sushiswap/deploy-periphery-contracts.sh similarity index 100% rename from app/data/config/contract-sushiswap/deploy-periphery-contracts.sh rename to stack_orchestrator/data/config/contract-sushiswap/deploy-periphery-contracts.sh diff --git a/app/data/config/contract-sushiswap/deployment-params.env b/stack_orchestrator/data/config/contract-sushiswap/deployment-params.env similarity index 100% rename from app/data/config/contract-sushiswap/deployment-params.env rename to stack_orchestrator/data/config/contract-sushiswap/deployment-params.env diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml diff --git a/app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml diff --git a/app/data/config/fixturenet-eth/fixturenet-eth.env b/stack_orchestrator/data/config/fixturenet-eth/fixturenet-eth.env similarity index 100% rename from app/data/config/fixturenet-eth/fixturenet-eth.env rename to stack_orchestrator/data/config/fixturenet-eth/fixturenet-eth.env diff --git a/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh b/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh new file mode 100644 index 00000000..d444fcad --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh +# so we should have a mechanism to bundle it inside the container rather than link from here +# at deploy time. + +KEY="mykey" +CHAINID="laconic_9000-1" +MONIKER="localtestnet" +KEYRING="test" +KEYALGO="eth_secp256k1" +LOGLEVEL="info" +# trace evm +TRACE="--trace" +# TRACE="" + +if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then + # validate dependencies are installed + command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } + + # remove existing daemon and client + rm -rf $HOME/.laconicd/* + rm -rf $HOME/.laconic/* + + if [ -n "`which make`" ]; then + make install + fi + + laconicd config keyring-backend $KEYRING + laconicd config chain-id $CHAINID + + # if $KEY exists it should be deleted + laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO + + # Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) + laconicd init $MONIKER --chain-id $CHAINID + + # Change parameter token denominations to aphoton + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + # Custom modules + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + + if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then + echo "Setting timers for expiry tests." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + fi + + if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then + echo "Enabling auction and setting timers." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + fi + + # increase block time (?) + cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + + # Set gas limit in genesis + cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + + # disable produce empty block + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml + fi + + if [[ $1 == "pending" ]]; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + fi + fi + + # Allocate genesis accounts (cosmos formatted addresses) + laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING + + # Sign genesis transaction + laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID + + # Collect genesis tx + laconicd collect-gentxs + + # Run this to ensure everything worked and that the genesis file is setup correctly + laconicd validate-genesis + + if [[ $1 == "pending" ]]; then + echo "pending mode is on, please wait for the first block committed." + fi +else + echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'" +fi + +# Start the node (remove the --pruning=nothing flag if historical queries are not needed) +laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground diff --git a/app/data/config/fixturenet-laconicd/export-myaddress.sh b/stack_orchestrator/data/config/fixturenet-laconicd/export-myaddress.sh similarity index 100% rename from app/data/config/fixturenet-laconicd/export-myaddress.sh rename to stack_orchestrator/data/config/fixturenet-laconicd/export-myaddress.sh diff --git a/app/data/config/fixturenet-laconicd/export-mykey.sh b/stack_orchestrator/data/config/fixturenet-laconicd/export-mykey.sh similarity index 100% rename from app/data/config/fixturenet-laconicd/export-mykey.sh rename to stack_orchestrator/data/config/fixturenet-laconicd/export-mykey.sh diff --git a/app/data/config/fixturenet-laconicd/registry-cli-config-template.yml b/stack_orchestrator/data/config/fixturenet-laconicd/registry-cli-config-template.yml similarity index 100% rename from app/data/config/fixturenet-laconicd/registry-cli-config-template.yml rename to stack_orchestrator/data/config/fixturenet-laconicd/registry-cli-config-template.yml diff --git a/app/data/config/fixturenet-lotus/fund-account.sh b/stack_orchestrator/data/config/fixturenet-lotus/fund-account.sh similarity index 100% rename from app/data/config/fixturenet-lotus/fund-account.sh rename to stack_orchestrator/data/config/fixturenet-lotus/fund-account.sh diff --git a/app/data/config/fixturenet-lotus/lotus-env.env b/stack_orchestrator/data/config/fixturenet-lotus/lotus-env.env similarity index 100% rename from app/data/config/fixturenet-lotus/lotus-env.env rename to stack_orchestrator/data/config/fixturenet-lotus/lotus-env.env diff --git a/app/data/config/fixturenet-lotus/setup-miner.sh b/stack_orchestrator/data/config/fixturenet-lotus/setup-miner.sh similarity index 100% rename from app/data/config/fixturenet-lotus/setup-miner.sh rename to stack_orchestrator/data/config/fixturenet-lotus/setup-miner.sh diff --git a/app/data/config/fixturenet-lotus/setup-node.sh b/stack_orchestrator/data/config/fixturenet-lotus/setup-node.sh similarity index 100% rename from app/data/config/fixturenet-lotus/setup-node.sh rename to stack_orchestrator/data/config/fixturenet-lotus/setup-node.sh diff --git a/app/data/config/fixturenet-optimism/l1-params.env b/stack_orchestrator/data/config/fixturenet-optimism/l1-params.env similarity index 100% rename from app/data/config/fixturenet-optimism/l1-params.env rename to stack_orchestrator/data/config/fixturenet-optimism/l1-params.env diff --git a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh new file mode 100755 index 00000000..23a2bc30 --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh @@ -0,0 +1,172 @@ +#!/bin/bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" + +export DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" +# Optional create2 salt for deterministic deployment of contract implementations +export IMPL_SALT=$(openssl rand -hex 32) + +echo "Using L1 RPC endpoint ${CERC_L1_RPC}" + +# Exit if a deployment already exists (on restarts) +if [ -d "/l1-deployment/$DEPLOYMENT_CONTEXT" ]; then + echo "Deployment directory /l1-deployment/$DEPLOYMENT_CONTEXT, checking OptimismPortal deployment" + + OPTIMISM_PORTAL_ADDRESS=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/OptimismPortal.json | jq -r .address) + contract_code=$(cast code $OPTIMISM_PORTAL_ADDRESS --rpc-url $CERC_L1_RPC) + + if [ -z "${contract_code#0x}" ]; then + echo "Error: A deployment directory was found in the volume, but no contract code was found on-chain at the associated address. Please clear L1 deployment volume before restarting." + exit 1 + else + echo "Deployment found, exiting (successfully)." + exit 0 + fi +fi + +wait_for_block() { + local block="$1" # Block to wait for + local timeout="$2" # Max time to wait in seconds + + echo "Waiting for block $block." + i=0 + loops=$(($timeout/10)) + while [ -z "$block_result" ] && [[ "$i" -lt "$loops" ]]; do + sleep 10 + echo "Checking..." + block_result=$(cast block $block --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) + i=$(($i + 1)) + done +} + +# We need four accounts and their private keys for the deployment: Admin, Proposer, Batcher, and Sequencer +# If $CERC_L1_ADDRESS and $CERC_L1_PRIV_KEY have been set, we'll assign it to Admin and generate/fund the remaining three accounts from it +# If not, we'll assume the L1 is the stack's own fixturenet-eth and use the pre-funded accounts/keys from $CERC_L1_ACCOUNTS_CSV_URL +if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then + wallet1=$(cast wallet new) + wallet2=$(cast wallet new) + wallet3=$(cast wallet new) + # Admin + ADMIN=$CERC_L1_ADDRESS + ADMIN_KEY=$CERC_L1_PRIV_KEY + # Proposer + PROPOSER=$(echo "$wallet1" | awk '/Address:/{print $2}') + PROPOSER_KEY=$(echo "$wallet1" | awk '/Private key:/{print $3}') + # Batcher + BATCHER=$(echo "$wallet2" | awk '/Address:/{print $2}') + BATCHER_KEY=$(echo "$wallet2" | awk '/Private key:/{print $3}') + # Sequencer + SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}') + SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}') + + echo "Funding accounts." + wait_for_block 1 300 + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 10ether $BATCHER --private-key $ADMIN_KEY + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 2ether $SEQ --private-key $ADMIN_KEY +else + curl -o accounts.csv $CERC_L1_ACCOUNTS_CSV_URL + # Admin + ADMIN=$(awk -F ',' 'NR == 1 {print $2}' accounts.csv) + ADMIN_KEY=$(awk -F ',' 'NR == 1 {print $3}' accounts.csv) + # Proposer + PROPOSER=$(awk -F ',' 'NR == 2 {print $2}' accounts.csv) + PROPOSER_KEY=$(awk -F ',' 'NR == 2 {print $3}' accounts.csv) + # Batcher + BATCHER=$(awk -F ',' 'NR == 3 {print $2}' accounts.csv) + BATCHER_KEY=$(awk -F ',' 'NR == 3 {print $3}' accounts.csv) + # Sequencer + SEQ=$(awk -F ',' 'NR == 4 {print $2}' accounts.csv) + SEQ_KEY=$(awk -F ',' 'NR == 4 {print $3}' accounts.csv) +fi + +echo "Using accounts:" +echo -e "Admin: $ADMIN\nProposer: $PROPOSER\nBatcher: $BATCHER\nSequencer: $SEQ" + +# These accounts will be needed by other containers, so write them to a shared volume +echo "Writing accounts/private keys to volume l2_accounts." +accounts_json=$(jq -n \ + --arg Admin "$ADMIN" --arg AdminKey "$ADMIN_KEY" \ + --arg Proposer "$PROPOSER" --arg ProposerKey "$PROPOSER_KEY" \ + --arg Batcher "$BATCHER" --arg BatcherKey "$BATCHER_KEY" \ + --arg Seq "$SEQ" --arg SeqKey "$SEQ_KEY" \ + '{Admin: $Admin, AdminKey: $AdminKey, Proposer: $Proposer, ProposerKey: $ProposerKey, Batcher: $Batcher, BatcherKey: $BatcherKey, Seq: $Seq, SeqKey: $SeqKey}') +echo "$accounts_json" > "/l2-accounts/accounts.json" + +# Get a finalized L1 block to set as the starting point for the L2 deployment +# If the chain is a freshly created fixturenet-eth, a finalized block won't be available for many minutes; rather than wait, we can use block 1 +echo "Checking L1 for finalized block..." +finalized=$(cast block finalized --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) + +if [ -n "$finalized" ]; then + # finalized block was found + start_block=$finalized +else + # assume fresh chain and use block 1 instead + echo "No finalized block. Using block 1 instead." + # wait for 20 or so blocks to be safe + wait_for_block 24 300 + start_block=$(cast block 1 --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) +fi + +if [ -z "$start_block" ]; then + echo "Unable to query chain for starting block. Exiting..." + exit 1 +fi + +BLOCKHASH=$(echo $start_block | awk -F ' ' '{print $2}') +HEIGHT=$(echo $start_block | awk -F ' ' '{print $4}') +TIMESTAMP=$(echo $start_block | awk -F ' ' '{print $6}') + +echo "Using block as deployment point:" +echo "Height: $HEIGHT" +echo "Hash: $BLOCKHASH" +echo "Timestamp: $TIMESTAMP" + +# Fill out the deployment template (./deploy-config/getting-started.json) with our values: +echo "Writing deployment config." +deploy_config_file="deploy-config/$DEPLOYMENT_CONTEXT.json" +cp deploy-config/getting-started.json $deploy_config_file +sed -i "s/\"l1ChainID\": .*/\"l1ChainID\": $DEPLOYMENT_CONTEXT,/g" $deploy_config_file +sed -i "s/ADMIN/$ADMIN/g" $deploy_config_file +sed -i "s/PROPOSER/$PROPOSER/g" $deploy_config_file +sed -i "s/BATCHER/$BATCHER/g" $deploy_config_file +sed -i "s/SEQUENCER/$SEQ/g" $deploy_config_file +sed -i "s/BLOCKHASH/$BLOCKHASH/g" $deploy_config_file +sed -i "s/TIMESTAMP/$TIMESTAMP/g" $deploy_config_file + +mkdir -p deployments/$DEPLOYMENT_CONTEXT + +# Deployment requires the create2 deterministic proxy contract be published on L1 at address 0x4e59b44847b379578588920ca78fbf26c0b4956c +# See: https://github.com/Arachnid/deterministic-deployment-proxy +echo "Deploying create2 proxy contract..." +echo "Funding deployment signer address" +deployment_signer="0x3fab184622dc19b6109349b94811493bf2a45362" +cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 0.5ether $deployment_signer --private-key $ADMIN_KEY +echo "Deploying contract..." +raw_bytes="0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222" + +cast publish --rpc-url $CERC_L1_RPC $raw_bytes + +# Create the L2 deployment +echo "Deploying L1 Optimism contracts..." +forge script scripts/Deploy.s.sol:Deploy --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC +forge script scripts/Deploy.s.sol:Deploy --sig 'sync()' --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC + +echo "*************************************" +echo "Done deploying contracts." + +# Copy files needed by other containers to the appropriate shared volumes +echo "Copying deployment artifacts volume l1_deployment and deploy-config to volume l2_config" +cp -a /app/packages/contracts-bedrock/deployments/$DEPLOYMENT_CONTEXT /l1-deployment +cp /app/packages/contracts-bedrock/deploy-config/$DEPLOYMENT_CONTEXT.json /l2-config +openssl rand -hex 32 > /l2-config/l2-jwt.txt + +echo "Deployment successful. Exiting" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh new file mode 100755 index 00000000..b24fe867 --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh @@ -0,0 +1,155 @@ +#!/bin/bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# To facilitate deploying the Optimism contracts, a few additional arguments have been added to the geth start command +# Otherwise this script is unchanged from the image's default startup script + +ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` +NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` +NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'` +CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}" +CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}" + +cd /opt/testnet/build/el +python3 -m http.server 9898 & +cd $HOME + +START_CMD="geth" +if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then + START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" +fi + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + + # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process) + pkill -P ${geth_pid} + sleep 2 + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +if [ "true" == "$RUN_BOOTNODE" ]; then + $START_CMD \ + --datadir="${CERC_ETH_DATADIR}" \ + --nodekeyhex="${BOOTNODE_KEY}" \ + --nodiscover \ + --ipcdisable \ + --networkid=${NETWORK_ID} \ + --netrestrict="${NETRESTRICT}" \ + & + + geth_pid=$! +else + cd /opt/testnet/accounts + ./import_keys.sh + + echo -n "$JWT" > /opt/testnet/build/el/jwtsecret + + if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then + dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short) + dig_status_code=$? + if [[ $dig_status_code = 0 && -n $dig_result ]]; then + echo "Statediff DB at $CERC_STATEDIFF_DB_HOST" + CERC_RUN_STATEDIFF="true" + else + echo "No statediff DB available." + CERC_RUN_STATEDIFF="false" + fi + fi + + STATEDIFF_OPTS="" + if [ "$CERC_RUN_STATEDIFF" == "true" ]; then + ready=0 + echo "Waiting for statediff DB..." + while [ $ready -eq 0 ]; do + sleep 1 + export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" + result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ + -p "$CERC_STATEDIFF_DB_PORT" \ + -U "$CERC_STATEDIFF_DB_USER" \ + -d "$CERC_STATEDIFF_DB_NAME" \ + -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') + if [ -n "$result" ]; then + echo "DB ready..." + if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then + ready=1 + else + echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)" + fi + fi + done + STATEDIFF_OPTS="--statediff \ + --statediff.db.host=$CERC_STATEDIFF_DB_HOST \ + --statediff.db.name=$CERC_STATEDIFF_DB_NAME \ + --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ + --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \ + --statediff.db.port=$CERC_STATEDIFF_DB_PORT \ + --statediff.db.user=$CERC_STATEDIFF_DB_USER \ + --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ + --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ + --statediff.waitforsync=true \ + --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \ + --statediff.writing=true" + + if [ -d "${CERC_PLUGINS_DIR}" ]; then + # With plugeth, we separate the statediff options by prefixing with ' -- ' + STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}" + fi + fi + + # unlock account[0] + echo $ACCOUNT_PASSWORD > "$CERC_ETH_DATADIR/password" + + $START_CMD \ + --datadir="${CERC_ETH_DATADIR}" \ + --bootnodes="${ENODE}" \ + --allow-insecure-unlock \ + --password="${CERC_ETH_DATADIR}/password" \ + --unlock="$ETHERBASE" \ + --rpc.allow-unprotected-txs \ + --http \ + --http.addr="0.0.0.0" \ + --http.vhosts="*" \ + --http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --authrpc.addr="0.0.0.0" \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \ + --ws \ + --ws.addr="0.0.0.0" \ + --ws.origins="*" \ + --ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --networkid="${NETWORK_ID}" \ + --netrestrict="${NETRESTRICT}" \ + --gcmode archive \ + --txlookuplimit=0 \ + --cache.preimages \ + --syncmode=full \ + --mine \ + --miner.threads=1 \ + --metrics \ + --metrics.addr="0.0.0.0" \ + --verbosity=${CERC_GETH_VERBOSITY:-3} \ + --log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ + --miner.etherbase="${ETHERBASE}" \ + ${STATEDIFF_OPTS} \ + & + + geth_pid=$! +fi + +wait $geth_pid + +if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then + while [ 1 -eq 1 ]; do + sleep 60 + done +fi diff --git a/app/data/config/fixturenet-optimism/run-op-batcher.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh similarity index 50% rename from app/data/config/fixturenet-optimism/run-op-batcher.sh rename to stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh index 18955545..29a65d5d 100755 --- a/app/data/config/fixturenet-optimism/run-op-batcher.sh +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh @@ -6,22 +6,14 @@ fi CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" -# Get Batcher key from keys.json -BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"') +# Start op-batcher +L2_RPC="http://op-geth:8545" +ROLLUP_RPC="http://op-node:8547" +BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey) -cleanup() { - echo "Signal received, cleaning up..." - kill ${batcher_pid} - - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-batcher op-batcher \ - --l2-eth-rpc=http://op-geth:8545 \ - --rollup-rpc=http://op-node:8547 \ + --l2-eth-rpc=$L2_RPC \ + --rollup-rpc=$ROLLUP_RPC \ --poll-interval=1s \ --sub-safety-margin=6 \ --num-confirmations=1 \ @@ -32,8 +24,4 @@ op-batcher \ --rpc.enable-admin \ --max-channel-duration=1 \ --l1-eth-rpc=$CERC_L1_RPC \ - --private-key=$BATCHER_KEY \ - & - -batcher_pid=$! -wait $batcher_pid + --private-key="${BATCHER_KEY#0x}" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh new file mode 100755 index 00000000..9b06cedc --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh @@ -0,0 +1,56 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +l2_genesis_file="/l2-config/genesis.json" + +# Check for genesis file; if necessary, wait on op-node to generate +timeout=300 # 5 minutes +start_time=$(date +%s) +elapsed_time=0 +echo "Checking for L2 genesis file at location $l2_genesis_file" +while [ ! -f "$l2_genesis_file" ] && [ $elapsed_time -lt $timeout ]; do + echo "Waiting for L2 genesis file to be generated..." + sleep 10 + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) +done + +if [ ! -f "$l2_genesis_file" ]; then + echo "L2 genesis file not found after timeout of $timeout seconds. Exiting..." + exit 1 +fi + +# Initialize geth from our generated L2 genesis file (if not already initialized) +data_dir="/datadir" +if [ ! -d "$datadir/geth" ]; then + geth init --datadir=$data_dir $l2_genesis_file +fi + +# Start op-geth +jwt_file="/l2-config/l2-jwt.txt" + +geth \ + --datadir=$data_dir \ + --http \ + --http.corsdomain="*" \ + --http.vhosts="*" \ + --http.addr=0.0.0.0 \ + --http.api=web3,debug,eth,txpool,net,engine \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=8546 \ + --ws.origins="*" \ + --ws.api=debug,eth,txpool,net,engine \ + --syncmode=full \ + --gcmode=archive \ + --nodiscover \ + --maxpeers=0 \ + --networkid=42069 \ + --authrpc.vhosts="*" \ + --authrpc.addr=0.0.0.0 \ + --authrpc.port=8551 \ + --authrpc.jwtsecret=$jwt_file \ + --rollup.disabletxpoolgossip=true diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh new file mode 100755 index 00000000..60a96855 --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh @@ -0,0 +1,45 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" +DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" + +deploy_config_file="/l2-config/$DEPLOYMENT_CONTEXT.json" +deployment_dir="/l1-deployment/$DEPLOYMENT_CONTEXT" +genesis_outfile="/l2-config/genesis.json" +rollup_outfile="/l2-config/rollup.json" + +# Generate L2 genesis (if not already done) +if [ ! -f "$genesis_outfile" ] || [ ! -f "$rollup_outfile" ]; then + op-node genesis l2 \ + --deploy-config $deploy_config_file \ + --deployment-dir $deployment_dir \ + --outfile.l2 $genesis_outfile \ + --outfile.rollup $rollup_outfile \ + --l1-rpc $CERC_L1_RPC +fi + +# Start op-node +SEQ_KEY=$(cat /l2-accounts/accounts.json | jq -r .SeqKey) +jwt_file=/l2-config/l2-jwt.txt +L2_AUTH="http://op-geth:8551" +RPC_KIND=any # this can optionally be set to a preset for common node providers like Infura, Alchemy, etc. + +op-node \ + --l2=$L2_AUTH \ + --l2.jwt-secret=$jwt_file \ + --sequencer.enabled \ + --sequencer.l1-confs=5 \ + --verifier.l1-confs=4 \ + --rollup.config=$rollup_outfile \ + --rpc.addr=0.0.0.0 \ + --rpc.port=8547 \ + --p2p.disable \ + --rpc.enable-admin \ + --p2p.sequencer.key="${SEQ_KEY#0x}" \ + --l1=$CERC_L1_RPC \ + --l1.rpckind=$RPC_KIND diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh new file mode 100755 index 00000000..092705ca --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" + +# Start op-proposer +ROLLUP_RPC="http://op-node:8547" +PROPOSER_KEY=$(cat /l2-accounts/accounts.json | jq -r .ProposerKey) +L2OO_ADDR=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/L2OutputOracleProxy.json | jq -r .address) + +op-proposer \ + --poll-interval=12s \ + --rpc.port=8560 \ + --rollup-rpc=$ROLLUP_RPC \ + --l2oo-address="${L2OO_ADDR#0x}" \ + --private-key="${PROPOSER_KEY#0x}" \ + --l1-eth-rpc=$CERC_L1_RPC diff --git a/app/data/config/fixturenet-pocket/chains.json b/stack_orchestrator/data/config/fixturenet-pocket/chains.json similarity index 100% rename from app/data/config/fixturenet-pocket/chains.json rename to stack_orchestrator/data/config/fixturenet-pocket/chains.json diff --git a/app/data/config/fixturenet-pocket/create-fixturenet.sh b/stack_orchestrator/data/config/fixturenet-pocket/create-fixturenet.sh similarity index 100% rename from app/data/config/fixturenet-pocket/create-fixturenet.sh rename to stack_orchestrator/data/config/fixturenet-pocket/create-fixturenet.sh diff --git a/app/data/config/fixturenet-pocket/genesis.json b/stack_orchestrator/data/config/fixturenet-pocket/genesis.json similarity index 100% rename from app/data/config/fixturenet-pocket/genesis.json rename to stack_orchestrator/data/config/fixturenet-pocket/genesis.json diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh diff --git a/app/data/config/foundry/foundry.toml b/stack_orchestrator/data/config/foundry/foundry.toml similarity index 100% rename from app/data/config/foundry/foundry.toml rename to stack_orchestrator/data/config/foundry/foundry.toml diff --git a/app/data/config/go-nitro/run-nitro-node.sh b/stack_orchestrator/data/config/go-nitro/run-nitro-node.sh similarity index 100% rename from app/data/config/go-nitro/run-nitro-node.sh rename to stack_orchestrator/data/config/go-nitro/run-nitro-node.sh diff --git a/app/data/config/ipld-eth-beacon-indexer/indexer.env b/stack_orchestrator/data/config/ipld-eth-beacon-indexer/indexer.env similarity index 100% rename from app/data/config/ipld-eth-beacon-indexer/indexer.env rename to stack_orchestrator/data/config/ipld-eth-beacon-indexer/indexer.env diff --git a/app/data/config/ipld-eth-server/chain.json b/stack_orchestrator/data/config/ipld-eth-server/chain.json similarity index 100% rename from app/data/config/ipld-eth-server/chain.json rename to stack_orchestrator/data/config/ipld-eth-server/chain.json diff --git a/app/data/config/ipld-eth-server/entrypoint.sh b/stack_orchestrator/data/config/ipld-eth-server/entrypoint.sh similarity index 100% rename from app/data/config/ipld-eth-server/entrypoint.sh rename to stack_orchestrator/data/config/ipld-eth-server/entrypoint.sh diff --git a/app/data/config/keycloak/import/cerc-realm.json b/stack_orchestrator/data/config/keycloak/import/cerc-realm.json similarity index 100% rename from app/data/config/keycloak/import/cerc-realm.json rename to stack_orchestrator/data/config/keycloak/import/cerc-realm.json diff --git a/app/data/config/keycloak/keycloak.env b/stack_orchestrator/data/config/keycloak/keycloak.env similarity index 100% rename from app/data/config/keycloak/keycloak.env rename to stack_orchestrator/data/config/keycloak/keycloak.env diff --git a/app/data/config/keycloak/nginx/keycloak_proxy.conf b/stack_orchestrator/data/config/keycloak/nginx/keycloak_proxy.conf similarity index 100% rename from app/data/config/keycloak/nginx/keycloak_proxy.conf rename to stack_orchestrator/data/config/keycloak/nginx/keycloak_proxy.conf diff --git a/app/data/config/mainnet-eth-api-proxy/ethpxy.env b/stack_orchestrator/data/config/mainnet-eth-api-proxy/ethpxy.env similarity index 100% rename from app/data/config/mainnet-eth-api-proxy/ethpxy.env rename to stack_orchestrator/data/config/mainnet-eth-api-proxy/ethpxy.env diff --git a/stack_orchestrator/data/config/mainnet-eth-ipld-eth-db/db.env b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-db/db.env new file mode 100644 index 00000000..4ec11109 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-db/db.env @@ -0,0 +1,15 @@ +DATABASE_HOSTNAME="ipld-eth-db" +DATABASE_NAME="cerc" +DATABASE_PASSWORD="CHANGEME" +DATABASE_PORT=5432 +DATABASE_USER="vdbm" + +POSTGRES_DB="${DATABASE_NAME}" +POSTGRES_PASSWORD="${DATABASE_PASSWORD}" +POSTGRES_USER="${DATABASE_USER}" + +CERC_STATEDIFF_DB_HOST="${DATABASE_HOSTNAME}" +CERC_STATEDIFF_DB_NAME="${DATABASE_NAME}" +CERC_STATEDIFF_DB_PASSWORD="${DATABASE_PASSWORD}" +CERC_STATEDIFF_DB_PORT=${DATABASE_PORT} +CERC_STATEDIFF_DB_USER="${DATABASE_USER}" diff --git a/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/config.toml b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/config.toml new file mode 100644 index 00000000..c433df28 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/config.toml @@ -0,0 +1,33 @@ +[database] + name = "" # $DATABASE_NAME + hostname = "" # $DATABASE_HOSTNAME + port = 5432 # $DATABASE_PORT + user = "" # $DATABASE_USER + password = "" # $DATABASE_PASSWORD + +[log] + level = "info" # $LOG_LEVEL + +[server] + ipc = false + ipcPath = "" # $SERVER_IPC_PATH + ws = false + wsPath = "0.0.0.0:8080" # $SERVER_WS_PATH + http = true + httpPath = "0.0.0.0:8081" # $SERVER_HTTP_PATH + graphql = false # $SERVER_GRAPHQL + graphqlPath = "0.0.0.0:8082" # $SERVER_GRAPHQL_PATH + +[ethereum] + chainConfig = "" # ETH_CHAIN_CONFIG + chainID = "1" # $ETH_CHAIN_ID + rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP + httpPath = "mainnet-eth-geth-1:8545" # $ETH_HTTP_PATH + supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF + stateDiffTimeout = "4m" # $ETH_STATEDIFF_TIMEOUT + forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS + proxyOnError = true # $ETH_PROXY_ON_ERROR + nodeID = "" # $ETH_NODE_ID + clientName = "" # $ETH_CLIENT_NAME + genesisBlock = "" # $ETH_GENESIS_BLOCK + networkID = "1" # $ETH_NETWORK_ID diff --git a/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/srv.env b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/srv.env new file mode 100644 index 00000000..34c79ce4 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/srv.env @@ -0,0 +1,27 @@ +CERC_REMOTE_DEBUG="false" + +LOG_LEVEL="debug" + +ETH_CHAIN_ID=1 +ETH_CLIENT_NAME="Geth" +ETH_FORWARD_ETH_CALLS="false" +ETH_FORWARD_GET_STORAGE_AT="false" +ETH_GENESIS_BLOCK="0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" +ETH_HTTP_PATH="mainnet-eth-geth-1:8545" +ETH_NETWORK_ID=1 +ETH_NODE_ID=1112 +ETH_PROXY_ON_ERROR="true" +ETH_RPC_GAS_CAP=1000000000000 +ETH_SUPPORTS_STATEDIFF="true" +ETH_STATEDIFF_TIMEOUT=4m + +SERVER_HTTP_PATH=0.0.0.0:8081 +SERVER_GRAPHQL="false" +SERVER_GRAPHQLPATH=0.0.0.0:8082 + +METRICS="true" +PROM_HTTP="true" +PROM_HTTP_ADDR="0.0.0.0" +PROM_HTTP_PORT="8090" + +VDB_COMMAND="serve" diff --git a/app/data/config/mainnet-eth-keycloak/import/cerc-realm.json b/stack_orchestrator/data/config/mainnet-eth-keycloak/import/cerc-realm.json similarity index 100% rename from app/data/config/mainnet-eth-keycloak/import/cerc-realm.json rename to stack_orchestrator/data/config/mainnet-eth-keycloak/import/cerc-realm.json diff --git a/app/data/config/mainnet-eth-keycloak/keycloak.env b/stack_orchestrator/data/config/mainnet-eth-keycloak/keycloak.env similarity index 85% rename from app/data/config/mainnet-eth-keycloak/keycloak.env rename to stack_orchestrator/data/config/mainnet-eth-keycloak/keycloak.env index f37fdd30..31a19079 100644 --- a/app/data/config/mainnet-eth-keycloak/keycloak.env +++ b/stack_orchestrator/data/config/mainnet-eth-keycloak/keycloak.env @@ -1,8 +1,11 @@ POSTGRES_DB=keycloak POSTGRES_USER=keycloak POSTGRES_PASSWORD=keycloak +# Don't change this unless you also change the healthcheck in docker-compose-mainnet-eth-keycloak.yml +PGPORT=35432 KC_DB=postgres KC_DB_URL_HOST=keycloak-db +KC_DB_URL_PORT=${PGPORT} KC_DB_URL_DATABASE=${POSTGRES_DB} KC_DB_USERNAME=${POSTGRES_USER} KC_DB_PASSWORD=${POSTGRES_PASSWORD} diff --git a/app/data/config/mainnet-eth-keycloak/nginx.example b/stack_orchestrator/data/config/mainnet-eth-keycloak/nginx.example similarity index 69% rename from app/data/config/mainnet-eth-keycloak/nginx.example rename to stack_orchestrator/data/config/mainnet-eth-keycloak/nginx.example index 67095551..758f0ce1 100644 --- a/app/data/config/mainnet-eth-keycloak/nginx.example +++ b/stack_orchestrator/data/config/mainnet-eth-keycloak/nginx.example @@ -15,42 +15,49 @@ server { } upstream geth-pool { - keepalive 100; - hash $user_id consistent; - server server-a:8545; - server server-b:8545; - server server-c:8545; + server server-a:8545 max_fails=10 fail_timeout=2s; + server server-c:8545 max_fails=10 fail_timeout=2s backup; + server server-b:8545 max_fails=10 fail_timeout=2s backup; + keepalive 200; } -# self-reg happens on one server for clarity upstream reg-ui-pool { - keepalive 100; + keepalive 2; server server-a:8085; } upstream reg-api-pool { - keepalive 100; + keepalive 2; server server-a:8086; } -# auth uses server-a if available +# auth uses the reg server when available upstream auth-pool { - keepalive 100; + keepalive 10; server server-a:8080; server server-b:8080 backup; server server-c:8080 backup; } -log_format upstreamlog '[$time_local] $remote_addr $user_id - $server_name $host to: $upstream_addr: $request $status upstream_response_time $upstream_response_time msec $msec request_time $request_time'; -proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m; + +log_format upstreamlog '[$time_local] $msec $remote_addr $user_id - $server_name($host) to $upstream_addr: $request $status upstream_response_time $upstream_response_time request_time $request_time'; +proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m; + server { listen 443 ssl http2; server_name my.example.com; + keepalive_requests 500000; + keepalive_timeout 90s; + http2_max_requests 5000000; + http2_max_concurrent_streams 1024; + http2_idle_timeout 3m; + http2_recv_timeout 30s; access_log /var/log/nginx/my.example.com-access.log upstreamlog; error_log /var/log/nginx/my.example.com-error.log; ssl_certificate /etc/nginx/ssl/my.example.com/cert.pem; ssl_certificate_key /etc/nginx/ssl/my.example.com/key.pem; + ssl_session_cache shared:SSL:10m; error_page 500 502 503 504 /50x.html; location = /50x.html { @@ -60,7 +67,6 @@ server { #rewrite ^/?$ /newuser/; rewrite ^/?$ https://www.example.com/; - # geth-pool ETH API location ~ ^/v1/eth/?([^/]*)$ { set $apiKey $1; @@ -71,8 +77,8 @@ server { auth_request_set $user_id $sent_http_x_user_id; rewrite /.*$ / break; - client_max_body_size 3m; - client_body_buffer_size 3m; + client_max_body_size 3m; + client_body_buffer_size 3m; proxy_buffer_size 32k; proxy_buffers 16 32k; proxy_busy_buffers_size 96k; @@ -80,8 +86,10 @@ server { proxy_pass http://geth-pool; proxy_set_header X-Original-Remote-Addr $remote_addr; proxy_set_header X-User-Id $user_id; + proxy_http_version 1.1; + proxy_set_header Connection ""; } - + # keycloak location = /auth { internal; @@ -95,6 +103,8 @@ server { proxy_set_header X-Original-URI $request_uri; proxy_set_header X-Original-Remote-Addr $remote_addr; proxy_set_header X-Original-Host $host; + proxy_http_version 1.1; + proxy_set_header Connection ""; } location /newuser/ { diff --git a/app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py similarity index 100% rename from app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py rename to stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py diff --git a/app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt similarity index 100% rename from app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt rename to stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt diff --git a/app/data/config/mainnet-eth-keycloak/ui/config.yml b/stack_orchestrator/data/config/mainnet-eth-keycloak/ui/config.yml similarity index 100% rename from app/data/config/mainnet-eth-keycloak/ui/config.yml rename to stack_orchestrator/data/config/mainnet-eth-keycloak/ui/config.yml diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml diff --git a/app/data/config/mainnet-eth-metrics/metrics.env b/stack_orchestrator/data/config/mainnet-eth-metrics/metrics.env similarity index 100% rename from app/data/config/mainnet-eth-metrics/metrics.env rename to stack_orchestrator/data/config/mainnet-eth-metrics/metrics.env diff --git a/app/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml diff --git a/stack_orchestrator/data/config/mainnet-eth-plugeth/geth.env b/stack_orchestrator/data/config/mainnet-eth-plugeth/geth.env new file mode 100644 index 00000000..5c936d36 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth-plugeth/geth.env @@ -0,0 +1,75 @@ +# Enable remote debugging using dlv +CERC_REMOTE_DEBUG=false + +# Enable startup script debug output. +CERC_SCRIPT_DEBUG=false + +# Simple toggle to choose either a 'full' node or an 'archive' node +# (controls the values of --syncmode --gcmode --snapshot) +CERC_GETH_MODE_QUICK_SET=archive + +# Path to plugeth plugins. +CERC_PLUGINS_DIR="/usr/local/lib/plugeth" + +# Will turn on statediffing automatically if CERC_STATEDIFF_DB_HOST exists (see ../mainnet-eth-ipld-eth-db/db.env). +CERC_RUN_STATEDIFF="detect" + +# The minimum necessary verion of the DB to enable statediffing. +CERC_STATEDIFF_DB_GOOSE_MIN_VER=18 + +# Whether all statediff-related DB statements should be logged (useful for debugging). +CERC_STATEDIFF_DB_LOG_STATEMENTS=false + +# The number of concurrent workers to process state diff objects +CERC_STATEDIFF_WORKERS=16 + +# Each statediffing node should have a unique node ID. +CERC_STATEDIFF_DB_NODE_ID=1111 + +# Optional custom node name. +# GETH_NODE_NAME="" + +# Specify any other geth CLI options. +GETH_OPTS="" + +# --cache +GETH_CACHE=1024 + +# --cache.database +GETH_CACHE_DB=50 + +# --cache.gc +GETH_CACHE_GC=25 + +# --cache.trie +GETH_CACHE_TRIE=15 + +# --datadir +GETH_DATADIR="/data" + +# --http.api +GETH_HTTP_API="eth,web3,net" + +# --authrpc.jwtsecret +GETH_JWTSECRET="/etc/mainnet-eth/jwtsecret" + +# --maxpeers +GETH_MAX_PEERS=100 + +# --rpc.evmtimeout +GETH_RPC_EVMTIMEOUT=0 + +# --rpc.gascap +GETH_RPC_GASCAP=0 + +# --txlookuplimit +GETH_TXLOOKUPLIMIT=0 + +# --verbosity +GETH_VERBOSITY=3 + +# --log.vmodule +GETH_VMODULE="rpc/*=4" + +# --ws.api +GETH_WS_API="eth,web3,net" diff --git a/app/data/config/mainnet-eth/lighthouse.env b/stack_orchestrator/data/config/mainnet-eth-plugeth/lighthouse.env similarity index 100% rename from app/data/config/mainnet-eth/lighthouse.env rename to stack_orchestrator/data/config/mainnet-eth-plugeth/lighthouse.env diff --git a/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-geth.sh b/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-geth.sh new file mode 100755 index 00000000..1971c2d0 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-geth.sh @@ -0,0 +1,121 @@ +#!/bin/sh +if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi + +START_CMD="geth" +if [[ "true" == "$CERC_REMOTE_DEBUG" ]] && [[ -x "/usr/local/bin/dlv" ]]; then + START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" +fi + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + + # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process) + pkill -P ${geth_pid} + sleep 2 + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +MODE_FLAGS="" +if [[ "$CERC_GETH_MODE_QUICK_SET" = "archive" ]]; then + MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-full} --gcmode=${GETH_GC_MODE:-archive} --snapshot=${GETH_SNAPSHOT:-false}" +else + MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-snap} --gcmode=${GETH_GC_MODE:-full} --snapshot=${GETH_SNAPSHOT:-true}" +fi + +if [[ "${CERC_RUN_STATEDIFF}" == "detect" ]] && [[ -n "$CERC_STATEDIFF_DB_HOST" ]]; then + dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short) + dig_status_code=$? + if [[ $dig_status_code = 0 && -n $dig_result ]]; then + echo "Statediff DB at $CERC_STATEDIFF_DB_HOST" + CERC_RUN_STATEDIFF="true" + else + echo "No statediff DB available." + CERC_RUN_STATEDIFF="false" + fi +fi + +STATEDIFF_OPTS="" +if [[ "${CERC_RUN_STATEDIFF}" == "true" ]]; then + ready=0 + echo "Waiting for statediff DB..." + while [ $ready -eq 0 ]; do + sleep 1 + export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" + result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ + -p "$CERC_STATEDIFF_DB_PORT" \ + -U "$CERC_STATEDIFF_DB_USER" \ + -d "$CERC_STATEDIFF_DB_NAME" \ + -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') + if [ -n "$result" ]; then + echo "DB ready..." + if [[ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]]; then + ready=1 + else + echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)" + fi + fi + done + + STATEDIFF_OPTS="--statediff \ + --statediff.db.host=$CERC_STATEDIFF_DB_HOST \ + --statediff.db.name=$CERC_STATEDIFF_DB_NAME \ + --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ + --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \ + --statediff.db.port=$CERC_STATEDIFF_DB_PORT \ + --statediff.db.user=$CERC_STATEDIFF_DB_USER \ + --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ + --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ + --statediff.waitforsync=${CERC_STATEDIFF_WAIT_FO_SYNC:-true} \ + --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \ + --statediff.writing=${CERC_STATEDIFF_WRITING:-true}" + + if [[ -d "${CERC_PLUGINS_DIR}" ]]; then + # With plugeth, we separate the statediff options by prefixing with ' -- ' + STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}" + fi +fi + +$START_CMD \ + $MODE_FLAGS \ + --datadir="${GETH_DATADIR}"\ + --identity="${GETH_NODE_NAME}" \ + --maxpeers=${GETH_MAX_PEERS} \ + --cache=${GETH_CACHE} \ + --cache.gc=${GETH_CACHE_GC} \ + --cache.database=${GETH_CACHE_DB} \ + --cache.trie=${GETH_CACHE_TRIE} \ + --authrpc.addr='0.0.0.0' \ + --authrpc.vhosts='*' \ + --authrpc.jwtsecret="${GETH_JWTSECRET}" \ + --http \ + --http.addr='0.0.0.0' \ + --http.api="${GETH_HTTP_API}" \ + --http.vhosts='*' \ + --metrics \ + --metrics.addr='0.0.0.0' \ + --ws \ + --ws.addr='0.0.0.0' \ + --ws.api="${GETH_WS_API}" \ + --rpc.gascap=${GETH_RPC_GASCAP} \ + --rpc.evmtimeout=${GETH_RPC_EVMTIMEOUT} \ + --txlookuplimit=${GETH_TXLOOKUPLIMIT} \ + --verbosity=${GETH_VERBOSITY} \ + --log.vmodule="${GETH_VMODULE}" \ + ${STATEDIFF_OPTS} \ + ${GETH_OPTS} & + +geth_pid=$! +wait $geth_pid + +if [[ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]]; then + while [[ 1 -eq 1 ]]; do + sleep 60 + done +fi diff --git a/app/data/config/mainnet-eth/scripts/run-lighthouse.sh b/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh similarity index 100% rename from app/data/config/mainnet-eth/scripts/run-lighthouse.sh rename to stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh diff --git a/app/data/config/mainnet-eth/geth.env b/stack_orchestrator/data/config/mainnet-eth/geth.env similarity index 99% rename from app/data/config/mainnet-eth/geth.env rename to stack_orchestrator/data/config/mainnet-eth/geth.env index a01444df..365bb5fb 100644 --- a/app/data/config/mainnet-eth/geth.env +++ b/stack_orchestrator/data/config/mainnet-eth/geth.env @@ -25,7 +25,7 @@ GETH_CACHE_GC=25 # --cache.trie GETH_CACHE_TRIE=15 -j + # --datadir GETH_DATADIR="/data" diff --git a/stack_orchestrator/data/config/mainnet-eth/lighthouse.env b/stack_orchestrator/data/config/mainnet-eth/lighthouse.env new file mode 100644 index 00000000..11fc6b69 --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth/lighthouse.env @@ -0,0 +1,33 @@ +# Enable startup script debug output. +CERC_SCRIPT_DEBUG=false + +# Specify any other lighthouse CLI options. +LIGHTHOUSE_OPTS="" + +# Override the advertised public IP (optional) +# --enr-address +#LIGHTHOUSE_ENR_ADDRESS="" + +# --checkpoint-sync-url +LIGHTHOUSE_CHECKPOINT_SYNC_URL="https://beaconstate.ethstaker.cc" + +# --checkpoint-sync-url-timeout +LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT=300 + +# --datadir +LIGHTHOUSE_DATADIR=/data + +# --debug-level +LIGHTHOUSE_DEBUG_LEVEL=info + +# --http-port +LIGHTHOUSE_HTTP_PORT=5052 + +# --execution-jwt +LIGHTHOUSE_JWTSECRET=/etc/mainnet-eth/jwtsecret + +# --metrics-port +LIGHTHOUSE_METRICS_PORT=5054 + +# --port --enr-udp-port --enr-tcp-port +LIGHTHOUSE_NETWORK_PORT=9000 diff --git a/app/data/config/mainnet-eth/scripts/run-geth.sh b/stack_orchestrator/data/config/mainnet-eth/scripts/run-geth.sh similarity index 100% rename from app/data/config/mainnet-eth/scripts/run-geth.sh rename to stack_orchestrator/data/config/mainnet-eth/scripts/run-geth.sh diff --git a/stack_orchestrator/data/config/mainnet-eth/scripts/run-lighthouse.sh b/stack_orchestrator/data/config/mainnet-eth/scripts/run-lighthouse.sh new file mode 100755 index 00000000..efda735b --- /dev/null +++ b/stack_orchestrator/data/config/mainnet-eth/scripts/run-lighthouse.sh @@ -0,0 +1,30 @@ +#!/bin/bash +if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi + +ENR_OPTS="" +if [[ -n "$LIGHTHOUSE_ENR_ADDRESS" ]]; then + ENR_OPTS="--enr-address $LIGHTHOUSE_ENR_ADDRESS" +fi + +exec lighthouse bn \ + --checkpoint-sync-url "$LIGHTHOUSE_CHECKPOINT_SYNC_URL" \ + --checkpoint-sync-url-timeout ${LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT} \ + --datadir "$LIGHTHOUSE_DATADIR" \ + --debug-level $LIGHTHOUSE_DEBUG_LEVEL \ + --disable-deposit-contract-sync \ + --disable-upnp \ + --enr-tcp-port $LIGHTHOUSE_NETWORK_PORT \ + --enr-udp-port $LIGHTHOUSE_NETWORK_PORT \ + --execution-endpoint "$LIGHTHOUSE_EXECUTION_ENDPOINT" \ + --execution-jwt /etc/mainnet-eth/jwtsecret \ + --http \ + --http-address 0.0.0.0 \ + --http-port $LIGHTHOUSE_HTTP_PORT \ + --metrics \ + --metrics-address=0.0.0.0 \ + --metrics-port $LIGHTHOUSE_METRICS_PORT \ + --network mainnet \ + --port $LIGHTHOUSE_NETWORK_PORT \ + $ENR_OPTS $LIGHTHOUSE_OPTS diff --git a/app/data/config/mainnet-go-opera/go-opera.env b/stack_orchestrator/data/config/mainnet-go-opera/go-opera.env similarity index 100% rename from app/data/config/mainnet-go-opera/go-opera.env rename to stack_orchestrator/data/config/mainnet-go-opera/go-opera.env diff --git a/app/data/config/mainnet-go-opera/start-node.sh b/stack_orchestrator/data/config/mainnet-go-opera/start-node.sh similarity index 100% rename from app/data/config/mainnet-go-opera/start-node.sh rename to stack_orchestrator/data/config/mainnet-go-opera/start-node.sh diff --git a/app/data/config/mainnet-laconicd/registry-cli-config-template.yml b/stack_orchestrator/data/config/mainnet-laconicd/registry-cli-config-template.yml similarity index 100% rename from app/data/config/mainnet-laconicd/registry-cli-config-template.yml rename to stack_orchestrator/data/config/mainnet-laconicd/registry-cli-config-template.yml diff --git a/app/data/config/mainnet-laconicd/scripts/export-myaddress.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/export-myaddress.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/export-myaddress.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/export-myaddress.sh diff --git a/app/data/config/mainnet-laconicd/scripts/export-mykey.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/export-mykey.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/export-mykey.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/export-mykey.sh diff --git a/app/data/config/mainnet-laconicd/scripts/run-laconicd.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/run-laconicd.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/run-laconicd.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/run-laconicd.sh diff --git a/app/data/config/network/wait-for-it.sh b/stack_orchestrator/data/config/network/wait-for-it.sh similarity index 100% rename from app/data/config/network/wait-for-it.sh rename to stack_orchestrator/data/config/network/wait-for-it.sh diff --git a/app/data/config/nitro-contracts/deploy.sh b/stack_orchestrator/data/config/nitro-contracts/deploy.sh similarity index 100% rename from app/data/config/nitro-contracts/deploy.sh rename to stack_orchestrator/data/config/nitro-contracts/deploy.sh diff --git a/app/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts diff --git a/app/data/config/optimism-contracts/hardhat-tasks/send-balance.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/send-balance.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/send-balance.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/send-balance.ts diff --git a/app/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts diff --git a/app/data/config/ponder/base-rates-config.json b/stack_orchestrator/data/config/ponder/base-rates-config.json similarity index 100% rename from app/data/config/ponder/base-rates-config.json rename to stack_orchestrator/data/config/ponder/base-rates-config.json diff --git a/app/data/config/ponder/deploy-erc20-contract.sh b/stack_orchestrator/data/config/ponder/deploy-erc20-contract.sh similarity index 100% rename from app/data/config/ponder/deploy-erc20-contract.sh rename to stack_orchestrator/data/config/ponder/deploy-erc20-contract.sh diff --git a/app/data/config/ponder/ponder-start.sh b/stack_orchestrator/data/config/ponder/ponder-start.sh similarity index 100% rename from app/data/config/ponder/ponder-start.sh rename to stack_orchestrator/data/config/ponder/ponder-start.sh diff --git a/app/data/config/ponder/ponder.indexer-1.config.ts b/stack_orchestrator/data/config/ponder/ponder.indexer-1.config.ts similarity index 100% rename from app/data/config/ponder/ponder.indexer-1.config.ts rename to stack_orchestrator/data/config/ponder/ponder.indexer-1.config.ts diff --git a/app/data/config/ponder/ponder.indexer-2.config.ts b/stack_orchestrator/data/config/ponder/ponder.indexer-2.config.ts similarity index 100% rename from app/data/config/ponder/ponder.indexer-2.config.ts rename to stack_orchestrator/data/config/ponder/ponder.indexer-2.config.ts diff --git a/app/data/config/ponder/ponder.watcher.config.ts b/stack_orchestrator/data/config/ponder/ponder.watcher.config.ts similarity index 100% rename from app/data/config/ponder/ponder.watcher.config.ts rename to stack_orchestrator/data/config/ponder/ponder.watcher.config.ts diff --git a/app/data/config/postgresql/create-pg-stat-statements.sql b/stack_orchestrator/data/config/postgresql/create-pg-stat-statements.sql similarity index 100% rename from app/data/config/postgresql/create-pg-stat-statements.sql rename to stack_orchestrator/data/config/postgresql/create-pg-stat-statements.sql diff --git a/app/data/config/postgresql/multiple-postgressql-databases.sh b/stack_orchestrator/data/config/postgresql/multiple-postgressql-databases.sh similarity index 100% rename from app/data/config/postgresql/multiple-postgressql-databases.sh rename to stack_orchestrator/data/config/postgresql/multiple-postgressql-databases.sh diff --git a/stack_orchestrator/data/config/proxy-server/run.sh b/stack_orchestrator/data/config/proxy-server/run.sh new file mode 100755 index 00000000..9e8dc7f5 --- /dev/null +++ b/stack_orchestrator/data/config/proxy-server/run.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +if [ "$ENABLE_PROXY" = "true" ]; then + echo "Proxy server enabled" + yarn proxy +else + echo "Proxy server disabled, exiting" + exit 0 +fi diff --git a/app/data/config/reth/start-lighthouse.sh b/stack_orchestrator/data/config/reth/start-lighthouse.sh similarity index 100% rename from app/data/config/reth/start-lighthouse.sh rename to stack_orchestrator/data/config/reth/start-lighthouse.sh diff --git a/app/data/config/reth/start-reth.sh b/stack_orchestrator/data/config/reth/start-reth.sh similarity index 100% rename from app/data/config/reth/start-reth.sh rename to stack_orchestrator/data/config/reth/start-reth.sh diff --git a/app/data/config/sushiswap-subgraph-v3/filecoin.js b/stack_orchestrator/data/config/sushiswap-subgraph-v3/filecoin.js similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/filecoin.js rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/filecoin.js diff --git a/app/data/config/sushiswap-subgraph-v3/run-blocks.sh b/stack_orchestrator/data/config/sushiswap-subgraph-v3/run-blocks.sh similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/run-blocks.sh rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/run-blocks.sh diff --git a/app/data/config/sushiswap-subgraph-v3/run-v3.sh b/stack_orchestrator/data/config/sushiswap-subgraph-v3/run-v3.sh similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/run-v3.sh rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/run-v3.sh diff --git a/app/data/config/tx-spammer/tx-spammer.env b/stack_orchestrator/data/config/tx-spammer/tx-spammer.env similarity index 100% rename from app/data/config/tx-spammer/tx-spammer.env rename to stack_orchestrator/data/config/tx-spammer/tx-spammer.env diff --git a/stack_orchestrator/data/config/uniswap-interface/build-app.sh b/stack_orchestrator/data/config/uniswap-interface/build-app.sh new file mode 100755 index 00000000..d3b012e6 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/build-app.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Check and exit if a deployment already exists (on restarts) +if [ -d /app-builds/uniswap/build ]; then + echo "Build already exists, remove volume to rebuild" + exit 0 +fi + +yarn build + +# Move build to app-builds so urbit can deploy it +mkdir /app-builds/uniswap +cp -r ./build /app-builds/uniswap/ diff --git a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh new file mode 100755 index 00000000..f07a205b --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh @@ -0,0 +1,149 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting globs" +echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading glob files" +ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT} +ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT} + +uniswap_app_build='/app-builds/uniswap/build' +uniswap_desk_dir='/urbit/zod/uniswap' + +if [ -d ${uniswap_desk_dir} ]; then + echo "Uniswap desk dir already exists, skipping deployment..." + exit 0 +fi + +# Fire curl requests to perform operations on the ship +dojo () { + curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321 +} + +hood () { + curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321 +} + +# Create/mount a uniswap desk +hood "merge %uniswap our %landscape" +hood "mount %uniswap" + +# Loop until the uniswap build appears +while [ ! -d ${uniswap_app_build} ]; do + echo "Uniswap app build not found, retrying in 5s..." + sleep 5 +done +echo "Build found..." + +# Copy over build to desk data dir +cp -r ${uniswap_app_build} ${uniswap_desk_dir} + +# Create a mark file for .map file type +cat << EOF > "${uniswap_desk_dir}/mar/map.hoon" +:: +:::: /hoon/map/mar + :: Mark for js source maps +/? 310 +:: +=, eyre +|_ mud=@ +++ grow + |% + ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))] + -- +++ grab + |% :: convert from + ++ mime |=([p=mite q=octs] (@t q.q)) + ++ noun cord :: clam from %noun + -- +++ grad %mime +-- +EOF + +# Create a mark file for .woff file type +cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/woff dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +# Create a mark file for .ttf file type +cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/ttf dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +rm "${uniswap_desk_dir}/desk.bill" +rm "${uniswap_desk_dir}/desk.ship" + +# Commit changes and create a glob +hood "commit %uniswap" +dojo "-landscape!make-glob %uniswap /build" + +glob_file=$(ls -1 -c zod/.urb/put | head -1) +echo "Created glob file: ${glob_file}" + +upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add) +glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//') + +echo "Glob file uploaded to IFPS:" +echo "{ cid: ${glob_cid}, filename: ${glob_file} }" + +# Curl and wait for the glob to be hosted +glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}" + +echo "Checking if glob file hosted at ${glob_url}" +while true; do + response=$(curl -sL -w "%{http_code}" -o /dev/null "$glob_url") + + if [ $response -eq 200 ]; then + echo "File found at $glob_url" + break # Exit the loop if the file is found + else + echo "File not found. Retrying in a few seconds..." + sleep 5 + fi +done + +glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") + +# Update the docket file +cat << EOF > "${uniswap_desk_dir}/desk.docket-0" +:~ title+'Uniswap' + info+'Self-hosted uniswap frontend.' + color+0xcd.75df + image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' + base+'uniswap' + glob-http+['${glob_url}' ${glob_hash}] + version+[0 0 1] + website+'https://uniswap.org/' + license+'MIT' +== +EOF + +# Commit changes and install the app +hood "commit %uniswap" +hood "install our %uniswap" + +echo "Uniswap app installed" diff --git a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh new file mode 100755 index 00000000..2463e1c7 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +# $1: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $2: Glob file hash (eg. 0vabcd) +# $3: Urbit ship's pier dir (default: ./zod) + +if [ "$#" -lt 2 ]; then + echo "Insufficient arguments" + exit 0 +fi + +glob_url=$1 +glob_hash=$2 +echo "Using glob file from ${glob_url} with hash ${glob_hash}" + +# Default pier dir: ./zod +# Default desk dir: ./zod/uniswap +pier_dir="${3:-./zod}" +uniswap_desk_dir="${pier_dir}/uniswap" +echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path" + +# Fire curl requests to perform operations on the ship +dojo () { + curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321 +} + +hood () { + curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321 +} + +# Create/mount a uniswap desk +hood "merge %uniswap our %landscape" +hood "mount %uniswap" + +# Create a mark file for .map file type +cat << EOF > "${uniswap_desk_dir}/mar/map.hoon" +:: +:::: /hoon/map/mar + :: Mark for js source maps +/? 310 +:: +=, eyre +|_ mud=@ +++ grow + |% + ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))] + -- +++ grab + |% :: convert from + ++ mime |=([p=mite q=octs] (@t q.q)) + ++ noun cord :: clam from %noun + -- +++ grad %mime +-- +EOF + +# Create a mark file for .woff file type +cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/woff dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +# Create a mark file for .ttf file type +cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/ttf dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +rm "${uniswap_desk_dir}/desk.bill" +rm "${uniswap_desk_dir}/desk.ship" + +# Update the docket file +cat << EOF > "${uniswap_desk_dir}/desk.docket-0" +:~ title+'Uniswap' + info+'Self-hosted uniswap frontend.' + color+0xcd.75df + image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' + base+'uniswap' + glob-http+['${glob_url}' ${glob_hash}] + version+[0 0 1] + website+'https://uniswap.org/' + license+'MIT' +== +EOF + +# Commit changes and install the app +hood "commit %uniswap" +hood "install our %uniswap" + +echo "Uniswap app installed" diff --git a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh new file mode 100755 index 00000000..31f03d72 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# $1: Remote user host +# $2: Remote Urbit ship's pier dir path (eg. /home/user/zod) +# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $4: Glob file hash (eg. 0vabcd) + +if [ "$#" -ne 4 ]; then + echo "Incorrect number of arguments" + echo "Usage: $0 " + exit 1 +fi + +remote_user_host="$1" +remote_pier_folder="$2" +glob_url="$3" +glob_hash="$4" + +installation_script="./install-uniswap-app.sh" + +ssh "$remote_user_host" "bash -s $glob_url $glob_hash $remote_pier_folder" < "$installation_script" diff --git a/stack_orchestrator/data/config/urbit/run-urbit-ship.sh b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh new file mode 100755 index 00000000..bb301c81 --- /dev/null +++ b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +pier_dir="/urbit/zod" + +# Run urbit ship in daemon mode +# Check if the directory exists +if [ -d "$pier_dir" ]; then + echo "Pier directory already exists, rebooting..." + urbit -d zod +else + echo "Creating a new fake ship..." + urbit -d -F zod +fi diff --git a/app/data/config/watcher-azimuth/gateway-watchers.json b/stack_orchestrator/data/config/watcher-azimuth/gateway-watchers.json similarity index 100% rename from app/data/config/watcher-azimuth/gateway-watchers.json rename to stack_orchestrator/data/config/watcher-azimuth/gateway-watchers.json diff --git a/app/data/config/watcher-azimuth/merge-toml.js b/stack_orchestrator/data/config/watcher-azimuth/merge-toml.js similarity index 100% rename from app/data/config/watcher-azimuth/merge-toml.js rename to stack_orchestrator/data/config/watcher-azimuth/merge-toml.js diff --git a/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh new file mode 100755 index 00000000..4bcad74c --- /dev/null +++ b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh @@ -0,0 +1,28 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}" +echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}" +echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}" + +# Replace env variables in template TOML file +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \ + s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \ + s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/watcher-config.toml + +# Merge SO watcher config with existing config file +node merge-toml.js + +yarn watch:contract --address $CONTRACT_ADDRESS --kind $CONTRACT_NAME --checkpoint true --starting-block $STARTING_BLOCK + +echo 'yarn job-runner' +yarn job-runner diff --git a/app/data/config/watcher-azimuth/start-server.sh b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh similarity index 80% rename from app/data/config/watcher-azimuth/start-server.sh rename to stack_orchestrator/data/config/watcher-azimuth/start-server.sh index c84c58d0..fa334653 100755 --- a/app/data/config/watcher-azimuth/start-server.sh +++ b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh @@ -4,18 +4,17 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}" -CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}" - echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}" echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}" +echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}" # Replace env variables in template TOML file # Read in the config template TOML file and modify it WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \ - s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}| ") + s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \ + s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ") # Write the modified content to a new file echo "$WATCHER_CONFIG" > environments/watcher-config.toml diff --git a/app/data/config/watcher-azimuth/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml similarity index 67% rename from app/data/config/watcher-azimuth/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml index 1a4616fc..2a91fedf 100644 --- a/app/data/config/watcher-azimuth/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml @@ -2,6 +2,9 @@ host = "0.0.0.0" maxSimultaneousRequests = -1 +[metrics] + host = "0.0.0.0" + [database] host = "watcher-db" port = 5432 @@ -12,3 +15,7 @@ [upstream.ethServer] gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL" rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC" + +[jobQueue] + historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE + blockDelayInMilliSecs = 12000 diff --git a/app/data/config/watcher-erc20/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-erc20/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-erc20/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-erc20/erc20-watcher.toml diff --git a/app/data/config/watcher-erc721/erc721-watcher.toml b/stack_orchestrator/data/config/watcher-erc721/erc721-watcher.toml similarity index 100% rename from app/data/config/watcher-erc721/erc721-watcher.toml rename to stack_orchestrator/data/config/watcher-erc721/erc721-watcher.toml diff --git a/app/data/config/watcher-gelato/create-and-import-checkpoint.sh b/stack_orchestrator/data/config/watcher-gelato/create-and-import-checkpoint.sh similarity index 100% rename from app/data/config/watcher-gelato/create-and-import-checkpoint.sh rename to stack_orchestrator/data/config/watcher-gelato/create-and-import-checkpoint.sh diff --git a/app/data/config/watcher-gelato/start-job-runner.sh b/stack_orchestrator/data/config/watcher-gelato/start-job-runner.sh similarity index 100% rename from app/data/config/watcher-gelato/start-job-runner.sh rename to stack_orchestrator/data/config/watcher-gelato/start-job-runner.sh diff --git a/app/data/config/watcher-gelato/start-server.sh b/stack_orchestrator/data/config/watcher-gelato/start-server.sh similarity index 100% rename from app/data/config/watcher-gelato/start-server.sh rename to stack_orchestrator/data/config/watcher-gelato/start-server.sh diff --git a/app/data/config/watcher-gelato/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-gelato/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-gelato/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-gelato/watcher-config-template.toml diff --git a/app/data/config/watcher-gelato/watcher-params.env b/stack_orchestrator/data/config/watcher-gelato/watcher-params.env similarity index 100% rename from app/data/config/watcher-gelato/watcher-params.env rename to stack_orchestrator/data/config/watcher-gelato/watcher-params.env diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh new file mode 100755 index 00000000..819b1096 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running job-runner..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh new file mode 100755 index 00000000..e2bbdaad --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running server..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml new file mode 100644 index 00000000..f5355a4b --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml @@ -0,0 +1,100 @@ +[server] + host = "0.0.0.0" + port = 3008 + kind = "active" + gqlPath = '/' + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + # CAUTION: Disable only if state creation is not desired or can be filled subsequently + enableState = false + + subgraphPath = "./subgraph-build" + + # Interval to restart wasm instance periodically + wasmRestartBlocksInterval = 20 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Flag to specify whether RPC endpoint supports block hash as block tag parameter + rpcSupportsBlockHashParam = false + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "merkl-sushiswap-v3-watcher-db" + port = 5432 + database = "merkl-sushiswap-v3-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT" + + # Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client) + rpcClient = true + + # Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint + isFEVM = true + + # Boolean flag to filter event logs by contracts + filterLogsByAddresses = true + # Boolean flag to filter event logs by topics + filterLogsByTopics = false + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@merkl-sushiswap-v3-watcher-db/merkl-sushiswap-v3-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + subgraphEventsOrder = true + # Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime + blockDelayInMilliSecs = 30000 + prefetchBlocksInMem = false + prefetchBlockCount = 10 + + # Boolean to switch between modes of processing events when starting the server. + # Setting to true will fetch filtered events and required blocks in a range of blocks and then process them. + # Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head). + useBlockRanges = true + + # Block range in which logs are fetched during historical blocks processing + historicalLogsBlockRange = 2000 + + # Max block range of historical processing after which it waits for completion of events processing + # If set to -1 historical processing does not wait for events processing and completes till latest canonical block + historicalMaxFetchAhead = 10000 diff --git a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh diff --git a/app/data/config/watcher-mobymask-v2/generate-peer-ids.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/generate-peer-ids.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/generate-peer-ids.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/generate-peer-ids.sh diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-config.json b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-config.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-app-config.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-config.json diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-start.sh diff --git a/app/data/config/watcher-mobymask-v2/mobymask-params.env b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-params.env diff --git a/app/data/config/watcher-mobymask-v2/optimism-params.env b/stack_orchestrator/data/config/watcher-mobymask-v2/optimism-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v2/optimism-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v2/optimism-params.env diff --git a/app/data/config/watcher-mobymask-v2/secrets-template.json b/stack_orchestrator/data/config/watcher-mobymask-v2/secrets-template.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/secrets-template.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/secrets-template.json diff --git a/app/data/config/watcher-mobymask-v2/set-tests-env.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/set-tests-env.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/set-tests-env.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/set-tests-env.sh diff --git a/app/data/config/watcher-mobymask-v2/start-server.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/start-server.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/start-server.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/start-server.sh diff --git a/app/data/config/watcher-mobymask-v2/test-app-config.json b/stack_orchestrator/data/config/watcher-mobymask-v2/test-app-config.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/test-app-config.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/test-app-config.json diff --git a/app/data/config/watcher-mobymask-v2/test-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/test-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/test-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/test-app-start.sh diff --git a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-mobymask-v2/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-mobymask-v2/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-mobymask-v2/watcher-config-template.toml diff --git a/app/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json diff --git a/app/data/config/watcher-mobymask-v3/mobymask-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/mobymask-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-app-start.sh diff --git a/app/data/config/watcher-mobymask-v3/mobymask-params.env b/stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v3/mobymask-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-params.env diff --git a/app/data/config/watcher-mobymask-v3/start-server.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/start-server.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/start-server.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/start-server.sh diff --git a/app/data/config/watcher-mobymask-v3/watcher-config-rates.toml b/stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-rates.toml similarity index 100% rename from app/data/config/watcher-mobymask-v3/watcher-config-rates.toml rename to stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-rates.toml diff --git a/app/data/config/watcher-mobymask-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-mobymask-v3/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-template.toml diff --git a/app/data/config/watcher-mobymask/mobymask-watcher-db.sql b/stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher-db.sql similarity index 100% rename from app/data/config/watcher-mobymask/mobymask-watcher-db.sql rename to stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher-db.sql diff --git a/app/data/config/watcher-mobymask/mobymask-watcher.toml b/stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher.toml similarity index 100% rename from app/data/config/watcher-mobymask/mobymask-watcher.toml rename to stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher.toml diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh new file mode 100755 index 00000000..819b1096 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running job-runner..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh new file mode 100755 index 00000000..e2bbdaad --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running server..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml new file mode 100644 index 00000000..7cfabedd --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml @@ -0,0 +1,100 @@ +[server] + host = "0.0.0.0" + port = 3008 + kind = "active" + gqlPath = "/" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + # CAUTION: Disable only if state creation is not desired or can be filled subsequently + enableState = false + + subgraphPath = "./subgraph-build" + + # Interval to restart wasm instance periodically + wasmRestartBlocksInterval = 20 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Flag to specify whether RPC endpoint supports block hash as block tag parameter + rpcSupportsBlockHashParam = false + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "sushiswap-v3-watcher-db" + port = 5432 + database = "sushiswap-v3-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT" + + # Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client) + rpcClient = true + + # Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint + isFEVM = true + + # Boolean flag to filter event logs by contracts + filterLogsByAddresses = true + # Boolean flag to filter event logs by topics + filterLogsByTopics = true + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@sushiswap-v3-watcher-db/sushiswap-v3-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + subgraphEventsOrder = true + # Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime + blockDelayInMilliSecs = 30000 + prefetchBlocksInMem = false + prefetchBlockCount = 10 + + # Boolean to switch between modes of processing events when starting the server. + # Setting to true will fetch filtered events and required blocks in a range of blocks and then process them. + # Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head). + useBlockRanges = true + + # Block range in which logs are fetched during historical blocks processing + historicalLogsBlockRange = 2000 + + # Max block range of historical processing after which it waits for completion of events processing + # If set to -1 historical processing does not wait for events processing and completes till latest canonical block + historicalMaxFetchAhead = 10000 diff --git a/app/data/config/watcher-sushiswap/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/erc20-watcher.toml diff --git a/app/data/config/watcher-sushiswap/lotus-params.env b/stack_orchestrator/data/config/watcher-sushiswap/lotus-params.env similarity index 100% rename from app/data/config/watcher-sushiswap/lotus-params.env rename to stack_orchestrator/data/config/watcher-sushiswap/lotus-params.env diff --git a/app/data/config/watcher-sushiswap/sushi-info-watcher-test.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher-test.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-info-watcher-test.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher-test.toml diff --git a/app/data/config/watcher-sushiswap/sushi-info-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-info-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher.toml diff --git a/app/data/config/watcher-sushiswap/sushi-watcher-test.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher-test.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-watcher-test.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher-test.toml diff --git a/app/data/config/watcher-sushiswap/sushi-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/erc20-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/run.sh b/stack_orchestrator/data/config/watcher-uniswap-v3/run.sh similarity index 100% rename from app/data/config/watcher-uniswap-v3/run.sh rename to stack_orchestrator/data/config/watcher-uniswap-v3/run.sh diff --git a/app/data/config/watcher-uniswap-v3/uni-info-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/uni-info-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/uni-info-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/uni-info-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/uni-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/uni-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/uni-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/uni-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/watch-contract.sh b/stack_orchestrator/data/config/watcher-uniswap-v3/watch-contract.sh similarity index 100% rename from app/data/config/watcher-uniswap-v3/watch-contract.sh rename to stack_orchestrator/data/config/watcher-uniswap-v3/watch-contract.sh diff --git a/app/data/container-build/build-base.sh b/stack_orchestrator/data/container-build/build-base.sh similarity index 100% rename from app/data/container-build/build-base.sh rename to stack_orchestrator/data/container-build/build-base.sh diff --git a/app/data/container-build/cerc-act-runner-task-executor/build.sh b/stack_orchestrator/data/container-build/cerc-act-runner-task-executor/build.sh similarity index 74% rename from app/data/container-build/cerc-act-runner-task-executor/build.sh rename to stack_orchestrator/data/container-build/cerc-act-runner-task-executor/build.sh index 25620a53..b625ed4b 100755 --- a/app/data/container-build/cerc-act-runner-task-executor/build.sh +++ b/stack_orchestrator/data/container-build/cerc-act-runner-task-executor/build.sh @@ -2,4 +2,4 @@ # Build a local version of the task executor for act-runner source ${CERC_CONTAINER_BASE_DIR}/build-base.sh SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/gitea/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR} +docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/act-runner/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR} diff --git a/app/data/container-build/cerc-act-runner/build.sh b/stack_orchestrator/data/container-build/cerc-act-runner/build.sh similarity index 100% rename from app/data/container-build/cerc-act-runner/build.sh rename to stack_orchestrator/data/container-build/cerc-act-runner/build.sh diff --git a/app/data/container-build/cerc-builder-gerbil/Dockerfile b/stack_orchestrator/data/container-build/cerc-builder-gerbil/Dockerfile similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/Dockerfile rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/Dockerfile diff --git a/app/data/container-build/cerc-builder-gerbil/README.md b/stack_orchestrator/data/container-build/cerc-builder-gerbil/README.md similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/README.md rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/README.md diff --git a/app/data/container-build/cerc-builder-gerbil/entrypoint.sh b/stack_orchestrator/data/container-build/cerc-builder-gerbil/entrypoint.sh similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/entrypoint.sh rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/entrypoint.sh diff --git a/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh b/stack_orchestrator/data/container-build/cerc-builder-gerbil/install-dependencies.sh similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/install-dependencies.sh rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/install-dependencies.sh diff --git a/app/data/container-build/cerc-builder-js/Dockerfile b/stack_orchestrator/data/container-build/cerc-builder-js/Dockerfile similarity index 100% rename from app/data/container-build/cerc-builder-js/Dockerfile rename to stack_orchestrator/data/container-build/cerc-builder-js/Dockerfile diff --git a/app/data/container-build/cerc-builder-js/README.md b/stack_orchestrator/data/container-build/cerc-builder-js/README.md similarity index 100% rename from app/data/container-build/cerc-builder-js/README.md rename to stack_orchestrator/data/container-build/cerc-builder-js/README.md diff --git a/app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh b/stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh diff --git a/app/data/container-build/cerc-builder-js/build-npm-package.sh b/stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/build-npm-package.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package.sh diff --git a/app/data/container-build/cerc-builder-js/check-uid.sh b/stack_orchestrator/data/container-build/cerc-builder-js/check-uid.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/check-uid.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/check-uid.sh diff --git a/app/data/container-build/cerc-builder-js/entrypoint.sh b/stack_orchestrator/data/container-build/cerc-builder-js/entrypoint.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/entrypoint.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/entrypoint.sh diff --git a/app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh b/stack_orchestrator/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh diff --git a/app/data/container-build/cerc-eth-api-proxy/build.sh b/stack_orchestrator/data/container-build/cerc-eth-api-proxy/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-api-proxy/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-api-proxy/build.sh diff --git a/app/data/container-build/cerc-eth-probe/build.sh b/stack_orchestrator/data/container-build/cerc-eth-probe/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-probe/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-probe/build.sh diff --git a/app/data/container-build/cerc-eth-statediff-fill-service/build.sh b/stack_orchestrator/data/container-build/cerc-eth-statediff-fill-service/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-statediff-fill-service/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-statediff-fill-service/build.sh diff --git a/app/data/container-build/cerc-eth-statediff-service/build.sh b/stack_orchestrator/data/container-build/cerc-eth-statediff-service/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-statediff-service/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-statediff-service/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/run-el.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/run-el.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh diff --git a/app/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh diff --git a/app/data/container-build/cerc-foundry/build.sh b/stack_orchestrator/data/container-build/cerc-foundry/build.sh similarity index 100% rename from app/data/container-build/cerc-foundry/build.sh rename to stack_orchestrator/data/container-build/cerc-foundry/build.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/Dockerfile b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/Dockerfile similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/Dockerfile rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/Dockerfile diff --git a/app/data/container-build/cerc-go-ethereum-foundry/build.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/build.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/build.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/build.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json diff --git a/app/data/container-build/cerc-go-ethereum-foundry/genesis.json b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis.json similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/genesis.json rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis.json diff --git a/app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol diff --git a/app/data/container-build/cerc-go-ethereum/build.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum/build.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum/build.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum/build.sh diff --git a/app/data/container-build/cerc-go-nitro/Dockerfile b/stack_orchestrator/data/container-build/cerc-go-nitro/Dockerfile similarity index 100% rename from app/data/container-build/cerc-go-nitro/Dockerfile rename to stack_orchestrator/data/container-build/cerc-go-nitro/Dockerfile diff --git a/app/data/container-build/cerc-go-nitro/build.sh b/stack_orchestrator/data/container-build/cerc-go-nitro/build.sh similarity index 100% rename from app/data/container-build/cerc-go-nitro/build.sh rename to stack_orchestrator/data/container-build/cerc-go-nitro/build.sh diff --git a/app/data/container-build/cerc-go-opera/build.sh b/stack_orchestrator/data/container-build/cerc-go-opera/build.sh similarity index 100% rename from app/data/container-build/cerc-go-opera/build.sh rename to stack_orchestrator/data/container-build/cerc-go-opera/build.sh diff --git a/app/data/container-build/cerc-graph-node/build.sh b/stack_orchestrator/data/container-build/cerc-graph-node/build.sh similarity index 100% rename from app/data/container-build/cerc-graph-node/build.sh rename to stack_orchestrator/data/container-build/cerc-graph-node/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-beacon-db/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-db/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-beacon-db/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-db/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-db/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-db/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-db/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-db/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-server/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-server/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-server/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-server/build.sh diff --git a/app/data/container-build/cerc-keycloak-reg-api/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak-reg-api/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak-reg-api/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak-reg-api/build.sh diff --git a/app/data/container-build/cerc-keycloak-reg-ui/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak-reg-ui/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak-reg-ui/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak-reg-ui/build.sh diff --git a/app/data/container-build/cerc-keycloak/Dockerfile b/stack_orchestrator/data/container-build/cerc-keycloak/Dockerfile similarity index 100% rename from app/data/container-build/cerc-keycloak/Dockerfile rename to stack_orchestrator/data/container-build/cerc-keycloak/Dockerfile diff --git a/app/data/container-build/cerc-keycloak/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak/build.sh diff --git a/app/data/container-build/cerc-laconic-console-host/Dockerfile b/stack_orchestrator/data/container-build/cerc-laconic-console-host/Dockerfile similarity index 100% rename from app/data/container-build/cerc-laconic-console-host/Dockerfile rename to stack_orchestrator/data/container-build/cerc-laconic-console-host/Dockerfile diff --git a/app/data/container-build/cerc-laconic-console-host/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-console-host/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-console-host/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-console-host/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml b/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml new file mode 100644 index 00000000..6c310842 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml @@ -0,0 +1,6 @@ +# Config for laconic-console running in a fixturenet with laconicd + +services: + wns: + server: 'LACONIC_HOSTED_ENDPOINT/api' + webui: 'LACONIC_HOSTED_ENDPOINT/console' diff --git a/app/data/container-build/cerc-laconic-dot-com/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-dot-com/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-dot-com/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-dot-com/build.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/Dockerfile b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/Dockerfile similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/Dockerfile rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/Dockerfile diff --git a/app/data/container-build/cerc-laconic-registry-cli/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/build.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/import-address.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-address.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/import-address.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-address.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/import-key.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-key.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/import-key.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-key.sh diff --git a/app/data/container-build/cerc-laconicd/build.sh b/stack_orchestrator/data/container-build/cerc-laconicd/build.sh similarity index 100% rename from app/data/container-build/cerc-laconicd/build.sh rename to stack_orchestrator/data/container-build/cerc-laconicd/build.sh diff --git a/app/data/container-build/cerc-lasso/build.sh b/stack_orchestrator/data/container-build/cerc-lasso/build.sh similarity index 100% rename from app/data/container-build/cerc-lasso/build.sh rename to stack_orchestrator/data/container-build/cerc-lasso/build.sh diff --git a/app/data/container-build/cerc-lighthouse-cli/build.sh b/stack_orchestrator/data/container-build/cerc-lighthouse-cli/build.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse-cli/build.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse-cli/build.sh diff --git a/app/data/container-build/cerc-lighthouse/Dockerfile b/stack_orchestrator/data/container-build/cerc-lighthouse/Dockerfile similarity index 100% rename from app/data/container-build/cerc-lighthouse/Dockerfile rename to stack_orchestrator/data/container-build/cerc-lighthouse/Dockerfile diff --git a/app/data/container-build/cerc-lighthouse/build.sh b/stack_orchestrator/data/container-build/cerc-lighthouse/build.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse/build.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse/build.sh diff --git a/app/data/container-build/cerc-lighthouse/start-lighthouse.sh b/stack_orchestrator/data/container-build/cerc-lighthouse/start-lighthouse.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse/start-lighthouse.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse/start-lighthouse.sh diff --git a/app/data/container-build/cerc-lotus/Dockerfile b/stack_orchestrator/data/container-build/cerc-lotus/Dockerfile similarity index 100% rename from app/data/container-build/cerc-lotus/Dockerfile rename to stack_orchestrator/data/container-build/cerc-lotus/Dockerfile diff --git a/app/data/container-build/cerc-lotus/build.sh b/stack_orchestrator/data/container-build/cerc-lotus/build.sh similarity index 100% rename from app/data/container-build/cerc-lotus/build.sh rename to stack_orchestrator/data/container-build/cerc-lotus/build.sh diff --git a/app/data/container-build/cerc-mobymask-snap/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask-snap/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask-snap/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask-snap/Dockerfile diff --git a/app/data/container-build/cerc-mobymask-snap/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask-snap/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask-snap/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask-snap/build.sh diff --git a/app/data/container-build/cerc-mobymask-ui/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask-ui/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask-ui/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask-ui/Dockerfile diff --git a/app/data/container-build/cerc-mobymask-ui/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask-ui/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask-ui/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask-ui/build.sh diff --git a/app/data/container-build/cerc-mobymask/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask/Dockerfile diff --git a/app/data/container-build/cerc-mobymask/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile new file mode 100644 index 00000000..d3ff3f1b --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -0,0 +1,44 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=20-bullseye +FROM node:${VARIANT} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + # Install semver + && su ${USERNAME} -c "umask 0002 && npm install -g semver" \ + && npm cache clean --force > /dev/null 2>&1 + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq gettext-base moreutils + +# [Optional] Uncomment if you want to install more global node modules +# RUN su node -c "npm install -g " + +# Expose port for http +EXPOSE 3000 + +COPY /scripts /scripts + +# Default command sleeps forever so docker doesn't kill it +ENTRYPOINT ["/scripts/start-serving-app.sh"] diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp new file mode 100644 index 00000000..51664deb --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp @@ -0,0 +1,9 @@ +FROM cerc/nextjs-base:local + +ARG CERC_NEXT_VERSION=keep +ARG CERC_BUILD_TOOL + +WORKDIR /app +COPY . . +RUN rm -rf node_modules build .next* +RUN /scripts/build-app.sh /app diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh new file mode 100755 index 00000000..cca8d64b --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Build cerc/laconic-registry-cli + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +CERC_CONTAINER_BUILD_WORK_DIR=${CERC_CONTAINER_BUILD_WORK_DIR:-$SCRIPT_DIR} +CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/Dockerfile} +CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local} + +docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR + +if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then + cat < $TMP_ENV + set -a + source .env + source $TMP_ENV + set +a + rm -f $TMP_ENV +fi + +for f in $(find "$TRG_DIR" -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do + for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},();]' '\n' | egrep -o '^"CERC_RUNTIME_ENV_[^\"]+"'); do + orig_name=$(echo -n "${e}" | sed 's/"//g') + cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g') + cur_val=$(echo -n "\$${cur_name}" | envsubst) + if [ "$CERC_RETAIN_ENV_QUOTES" != "true" ]; then + cur_val=$(sed "s/^[\"']//" <<< "$cur_val" | sed "s/[\"']//") + fi + esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val") + echo "$f: $cur_name=$cur_val" + sed -i "s/$orig_name/$esc_val/g" $f + done +done diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh new file mode 100755 index 00000000..ef6244cf --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_MIN_NEXTVER=13.4.2 + +CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}" +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ]; then + CERC_BUILD_TOOL=yarn + else + CERC_BUILD_TOOL=npm + fi +fi + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +WORK_DIR="${1:-/app}" + +cd "${WORK_DIR}" || exit 1 + +if [ ! -f "next.config.dist" ]; then + cp next.config.js next.config.dist +fi + +which js-beautify >/dev/null +if [ $? -ne 0 ]; then + npm i -g js-beautify +fi + +js-beautify next.config.dist > next.config.js +echo "" >> next.config.js + +WEBPACK_REQ_LINE=$(grep -n "require([\'\"]webpack[\'\"])" next.config.js | cut -d':' -f1) +if [ -z "$WEBPACK_REQ_LINE" ]; then + cat > next.config.js.0 < next.config.js.1 < { + a[v] = \`"CERC_RUNTIME_ENV_\${v.split(/\./).pop()}"\`; + return a; + }, {}); +} catch { + // If .env-list.json cannot be loaded, we are probably running in dev mode, so use process.env instead. + envMap = Object.keys(process.env).reduce((a, v) => { + if (v.startsWith('CERC_')) { + a[\`process.env.\${v}\`] = JSON.stringify(process.env[v]); + } + return a; + }, {}); +} +EOF + +CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }') +ENV_LINE=$(grep -n 'env:' next.config.js | cut -d':' -f1) +WEBPACK_CONF_LINE=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d':' -f1) +NEXT_SECTION_ADJUSTMENT=0 + +if [ -n "$WEBPACK_CONF_LINE" ]; then + WEBPACK_CONF_VAR=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d',' -f1 | cut -d'(' -f2) + head -$(( ${WEBPACK_CONF_LINE} )) next.config.js > next.config.js.2 + cat > next.config.js.3 < next.config.js.2 + cat > next.config.js.3 < { + config.plugins.push(new webpack.DefinePlugin(envMap)); + return config; + }, +EOF + NEXT_SECTION_ADJUSTMENT=1 + NEXT_SECTION_LINE=$ENV_LINE +else + echo "WARNING: Cannot find location to insert environment variable map in next.config.js" 1>&2 + rm -f next.config.js.* + NEXT_SECTION_LINE=0 +fi + +tail -$(( ${CONFIG_LINES} - ${NEXT_SECTION_LINE} + ${NEXT_SECTION_ADJUSTMENT} )) next.config.js > next.config.js.5 + +cat next.config.js.* | sed 's/^ *//g' | js-beautify | grep -v 'process\.\env\.' | js-beautify > next.config.js +rm next.config.js.* + +"${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json + +if [ ! -f "package.dist" ]; then + cp package.json package.dist +fi + +cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq '.scripts.cerc_generate = "next experimental-generate"' > package.json + +CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`" + +if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then + echo "Changing 'next' version specifier from '$CUR_NEXT_VERSION' to '$CERC_NEXT_VERSION' (set with '--extra-build-args \"--build-arg CERC_NEXT_VERSION=$CERC_NEXT_VERSION\"')" + cat package.json | jq ".dependencies.next = \"$CERC_NEXT_VERSION\"" | sponge package.json +fi + +$CERC_BUILD_TOOL install || exit 1 + +CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json` + +semver -p -r ">=$CERC_MIN_NEXTVER" $CUR_NEXT_VERSION +if [ $? -ne 0 ]; then + cat <" + +############################################################################### + +EOF + cat package.json | jq ".dependencies.next = \"^$CERC_MIN_NEXTVER\"" | sponge package.json + $CERC_BUILD_TOOL install || exit 1 +fi + +$CERC_BUILD_TOOL run cerc_compile || exit 1 + +exit 0 diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh new file mode 100755 index 00000000..59cb3d49 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +WORK_DIR="${1:-./}" +TMPF=$(mktemp) + +cd "$WORK_DIR" || exit 1 + +for d in $(find . -maxdepth 1 -type d | grep -v '\./\.' | grep '/' | cut -d'/' -f2); do + egrep "/$d[/$]?" .gitignore >/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + continue + fi + + for f in $(find "$d" -regex ".*.[tj]sx?$" -type f); do + cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF + done +done + +NEXT_CONF="next.config.js next.config.dist" +for f in $NEXT_CONF; do + cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF +done + +cat $TMPF | sort -u | jq --raw-input . | jq --slurp . +rm -f $TMPF diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh new file mode 100755 index 00000000..bf35bcdb --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CERC_MAX_GENERATE_TIME=${CERC_MAX_GENERATE_TIME:-60} +tpid="" + +ctrl_c() { + kill $tpid $(ps -ef | grep node | grep next | awk '{print $2}') 2>/dev/null +} + +trap ctrl_c INT + +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then + CERC_BUILD_TOOL=yarn + else + CERC_BUILD_TOOL=npm + fi +fi + +CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}" +cd "$CERC_WEBAPP_FILES_DIR" + +"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r +mv .next .next.old +mv .next-r/.next . + +if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then + jq -e '.scripts.cerc_generate' package.json >/dev/null + if [ $? -eq 0 ]; then + npm run cerc_generate > gen.out 2>&1 & + tail -f gen.out & + tpid=$! + + count=0 + generate_done="false" + while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do + sleep 1 + count=$((count + 1)) + grep 'rendered as static HTML' gen.out > /dev/null + if [ $? -eq 0 ]; then + generate_done="true" + fi + done + + if [ $generate_done != "true" ]; then + echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2 + exit 1 + fi + + kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null + tpid="" + fi +fi + +$CERC_BUILD_TOOL start . -p ${CERC_LISTEN_PORT:-3000} diff --git a/app/data/container-build/cerc-nitro-contracts/Dockerfile b/stack_orchestrator/data/container-build/cerc-nitro-contracts/Dockerfile similarity index 100% rename from app/data/container-build/cerc-nitro-contracts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-nitro-contracts/Dockerfile diff --git a/app/data/container-build/cerc-nitro-contracts/build.sh b/stack_orchestrator/data/container-build/cerc-nitro-contracts/build.sh similarity index 100% rename from app/data/container-build/cerc-nitro-contracts/build.sh rename to stack_orchestrator/data/container-build/cerc-nitro-contracts/build.sh diff --git a/app/data/container-build/cerc-nitro-rpc-client/Dockerfile b/stack_orchestrator/data/container-build/cerc-nitro-rpc-client/Dockerfile similarity index 100% rename from app/data/container-build/cerc-nitro-rpc-client/Dockerfile rename to stack_orchestrator/data/container-build/cerc-nitro-rpc-client/Dockerfile diff --git a/app/data/container-build/cerc-nitro-rpc-client/build.sh b/stack_orchestrator/data/container-build/cerc-nitro-rpc-client/build.sh similarity index 100% rename from app/data/container-build/cerc-nitro-rpc-client/build.sh rename to stack_orchestrator/data/container-build/cerc-nitro-rpc-client/build.sh diff --git a/app/data/container-build/cerc-optimism-contracts/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile similarity index 94% rename from app/data/container-build/cerc-optimism-contracts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile index ed9c4b22..2499df0a 100644 --- a/app/data/container-build/cerc-optimism-contracts/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile @@ -17,6 +17,6 @@ WORKDIR /app COPY . . RUN echo "Building optimism" && \ - yarn && yarn build + pnpm install && pnpm build WORKDIR /app/packages/contracts-bedrock diff --git a/app/data/container-build/cerc-optimism-contracts/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-contracts/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-contracts/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-contracts/build.sh diff --git a/app/data/container-build/cerc-optimism-l2geth/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-l2geth/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-l2geth/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-l2geth/build.sh diff --git a/app/data/container-build/cerc-optimism-op-batcher/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile similarity index 87% rename from app/data/container-build/cerc-optimism-op-batcher/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile index 23d6b629..f52e75b9 100644 --- a/app/data/container-build/cerc-optimism-op-batcher/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -9,7 +9,7 @@ COPY ./op-batcher /app/op-batcher COPY ./op-bindings /app/op-bindings COPY ./op-node /app/op-node COPY ./op-service /app/op-service -COPY ./op-signer /app/op-signer +#COPY ./op-signer /app/op-signer COPY ./go.mod /app/go.mod COPY ./go.sum /app/go.sum @@ -23,7 +23,7 @@ ARG TARGETOS TARGETARCH RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache jq bash diff --git a/app/data/container-build/cerc-optimism-op-batcher/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-batcher/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-batcher/build.sh diff --git a/app/data/container-build/cerc-optimism-op-node/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile similarity index 91% rename from app/data/container-build/cerc-optimism-op-node/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile index 17d273b6..ad63bb2c 100644 --- a/app/data/container-build/cerc-optimism-op-node/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -21,7 +21,7 @@ ARG TARGETOS TARGETARCH RUN make op-node VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache openssl jq diff --git a/app/data/container-build/cerc-optimism-op-node/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-node/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-node/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-node/build.sh diff --git a/app/data/container-build/cerc-optimism-op-proposer/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile similarity index 87% rename from app/data/container-build/cerc-optimism-op-proposer/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile index e91aa4bb..9032a7ff 100644 --- a/app/data/container-build/cerc-optimism-op-proposer/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -9,7 +9,7 @@ COPY ./op-proposer /app/op-proposer COPY ./op-bindings /app/op-bindings COPY ./op-node /app/op-node COPY ./op-service /app/op-service -COPY ./op-signer /app/op-signer +#COPY ./op-signer /app/op-signer COPY ./go.mod /app/go.mod COPY ./go.sum /app/go.sum COPY ./.git /app/.git @@ -22,7 +22,7 @@ ARG TARGETOS TARGETARCH RUN make op-proposer VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache jq bash diff --git a/app/data/container-build/cerc-optimism-op-proposer/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-proposer/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-proposer/build.sh diff --git a/app/data/container-build/cerc-plugeth-statediff/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth-statediff/build.sh similarity index 100% rename from app/data/container-build/cerc-plugeth-statediff/build.sh rename to stack_orchestrator/data/container-build/cerc-plugeth-statediff/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/Dockerfile b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/Dockerfile new file mode 100644 index 00000000..87d050ea --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/Dockerfile @@ -0,0 +1,22 @@ +# Using the same golang image as used to build plugeth: https://git.vdb.to/cerc-io/plugeth/src/branch/statediff/Dockerfile +FROM golang:1.20-alpine3.18 as delve + +# Add delve so that we can do remote debugging. +RUN go install github.com/go-delve/delve/cmd/dlv@latest + +FROM cerc/plugeth-statediff:local as statediff +FROM cerc/plugeth:local as plugeth + +FROM alpine:3.18 + +# Install tools often used in scripting, like bash, wget, and jq. +RUN apk add --no-cache ca-certificates bash wget curl python3 bind-tools postgresql-client jq + +COPY --from=delve /go/bin/dlv /usr/local/bin/ +COPY --from=plugeth /usr/local/bin/geth /usr/local/bin/ + +# Place all plugeth plugins in /usr/local/lib/plugeth +COPY --from=statediff /usr/local/lib/statediff.so /usr/local/lib/plugeth/ + +EXPOSE 8545 8546 8551 6060 30303 30303/udp 40000 +ENTRYPOINT ["geth"] diff --git a/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/build.sh new file mode 100755 index 00000000..9ab44946 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/cerc-plugeth-with-plugins +set -x + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/plugeth-with-plugins:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/app/data/container-build/cerc-plugeth/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth/build.sh similarity index 100% rename from app/data/container-build/cerc-plugeth/build.sh rename to stack_orchestrator/data/container-build/cerc-plugeth/build.sh diff --git a/app/data/container-build/cerc-pocket/build.sh b/stack_orchestrator/data/container-build/cerc-pocket/build.sh similarity index 100% rename from app/data/container-build/cerc-pocket/build.sh rename to stack_orchestrator/data/container-build/cerc-pocket/build.sh diff --git a/app/data/container-build/cerc-ponder/Dockerfile b/stack_orchestrator/data/container-build/cerc-ponder/Dockerfile similarity index 100% rename from app/data/container-build/cerc-ponder/Dockerfile rename to stack_orchestrator/data/container-build/cerc-ponder/Dockerfile diff --git a/app/data/container-build/cerc-ponder/build.sh b/stack_orchestrator/data/container-build/cerc-ponder/build.sh similarity index 100% rename from app/data/container-build/cerc-ponder/build.sh rename to stack_orchestrator/data/container-build/cerc-ponder/build.sh diff --git a/app/data/container-build/cerc-react-peer/Dockerfile b/stack_orchestrator/data/container-build/cerc-react-peer/Dockerfile similarity index 100% rename from app/data/container-build/cerc-react-peer/Dockerfile rename to stack_orchestrator/data/container-build/cerc-react-peer/Dockerfile diff --git a/app/data/container-build/cerc-react-peer/apply-webapp-config.sh b/stack_orchestrator/data/container-build/cerc-react-peer/apply-webapp-config.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/apply-webapp-config.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/apply-webapp-config.sh diff --git a/app/data/container-build/cerc-react-peer/build.sh b/stack_orchestrator/data/container-build/cerc-react-peer/build.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/build.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/build.sh diff --git a/app/data/container-build/cerc-react-peer/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-react-peer/start-serving-app.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/start-serving-app.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/start-serving-app.sh diff --git a/app/data/container-build/cerc-reth/build.sh b/stack_orchestrator/data/container-build/cerc-reth/build.sh similarity index 100% rename from app/data/container-build/cerc-reth/build.sh rename to stack_orchestrator/data/container-build/cerc-reth/build.sh diff --git a/app/data/container-build/cerc-sushiswap-subgraphs/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-subgraphs/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-subgraphs/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-subgraphs/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/build.sh diff --git a/app/data/container-build/cerc-sushiswap-v3-core/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-core/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-v3-core/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-core/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/build.sh diff --git a/app/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-v3-periphery/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-periphery/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/build.sh diff --git a/app/data/container-build/cerc-test-container/Dockerfile b/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile similarity index 100% rename from app/data/container-build/cerc-test-container/Dockerfile rename to stack_orchestrator/data/container-build/cerc-test-container/Dockerfile diff --git a/app/data/container-build/cerc-test-container/build.sh b/stack_orchestrator/data/container-build/cerc-test-container/build.sh similarity index 100% rename from app/data/container-build/cerc-test-container/build.sh rename to stack_orchestrator/data/container-build/cerc-test-container/build.sh diff --git a/app/data/container-build/cerc-test-container/run.sh b/stack_orchestrator/data/container-build/cerc-test-container/run.sh similarity index 100% rename from app/data/container-build/cerc-test-container/run.sh rename to stack_orchestrator/data/container-build/cerc-test-container/run.sh diff --git a/app/data/container-build/cerc-test-contract/build.sh b/stack_orchestrator/data/container-build/cerc-test-contract/build.sh similarity index 100% rename from app/data/container-build/cerc-test-contract/build.sh rename to stack_orchestrator/data/container-build/cerc-test-contract/build.sh diff --git a/app/data/container-build/cerc-tx-spammer/build.sh b/stack_orchestrator/data/container-build/cerc-tx-spammer/build.sh similarity index 100% rename from app/data/container-build/cerc-tx-spammer/build.sh rename to stack_orchestrator/data/container-build/cerc-tx-spammer/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile b/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile new file mode 100644 index 00000000..59804896 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git make alpine-sdk bash + +WORKDIR /app + +COPY . . + +RUN echo "Building uniswap-interface" && \ + yarn diff --git a/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh b/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh new file mode 100755 index 00000000..af1971b5 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Build the uniswap-interface image +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/uniswap-interface:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-interface diff --git a/app/data/container-build/cerc-uniswap-v3-info/Dockerfile b/stack_orchestrator/data/container-build/cerc-uniswap-v3-info/Dockerfile similarity index 100% rename from app/data/container-build/cerc-uniswap-v3-info/Dockerfile rename to stack_orchestrator/data/container-build/cerc-uniswap-v3-info/Dockerfile diff --git a/app/data/container-build/cerc-uniswap-v3-info/build.sh b/stack_orchestrator/data/container-build/cerc-uniswap-v3-info/build.sh similarity index 100% rename from app/data/container-build/cerc-uniswap-v3-info/build.sh rename to stack_orchestrator/data/container-build/cerc-uniswap-v3-info/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile new file mode 100644 index 00000000..7a3ca9b7 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.13.0a2-alpine3.18 + +RUN apk --update --no-cache add alpine-sdk jq bash curl wget + +WORKDIR /app + +ENTRYPOINT [ "bash" ] diff --git a/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh new file mode 100755 index 00000000..ebd396f1 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build the urbit-globs-host image + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/urbit-globs-host:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${SCRIPT_DIR} diff --git a/app/data/container-build/cerc-watcher-azimuth/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-azimuth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile diff --git a/app/data/container-build/cerc-watcher-azimuth/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-azimuth/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-azimuth/build.sh diff --git a/app/data/container-build/cerc-watcher-erc20/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-erc20/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-erc20/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-erc20/Dockerfile diff --git a/app/data/container-build/cerc-watcher-erc20/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-erc20/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-erc20/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-erc20/build.sh diff --git a/app/data/container-build/cerc-watcher-erc721/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-erc721/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-erc721/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-erc721/Dockerfile diff --git a/app/data/container-build/cerc-watcher-erc721/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-erc721/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-erc721/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-erc721/build.sh diff --git a/app/data/container-build/cerc-watcher-gelato/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-gelato/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-gelato/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-gelato/Dockerfile diff --git a/app/data/container-build/cerc-watcher-gelato/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-gelato/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-gelato/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-gelato/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile new file mode 100644 index 00000000..e09738ac --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq + +WORKDIR /app + +COPY . . + +RUN echo "Installing dependencies and building merkl-sushiswap-v3-watcher-ts" && \ + yarn && yarn build diff --git a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh new file mode 100755 index 00000000..b53ee621 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Build cerc/watcher-merkl-sushiswap-v3 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/my-new-stack:local -f ${CERC_REPO_BASE_DIR}/my-new-stack/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/my-new-stack + +docker build -t cerc/watcher-merkl-sushiswap-v3:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/merkl-sushiswap-v3-watcher-ts diff --git a/app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask-v2/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v2/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/build.sh diff --git a/app/data/container-build/cerc-watcher-mobymask-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v3/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v3/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/build.sh diff --git a/app/data/container-build/cerc-watcher-mobymask/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask/build.sh diff --git a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile new file mode 100644 index 00000000..ac6241c4 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq + +WORKDIR /app + +COPY . . + +RUN echo "Installing dependencies and building sushiswap-v3-watcher-ts" && \ + yarn && yarn build diff --git a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh new file mode 100755 index 00000000..4eb79eee --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-sushiswap-v3 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-sushiswap-v3:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-watcher-ts diff --git a/app/data/container-build/cerc-watcher-sushiswap/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-sushiswap/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-sushiswap/Dockerfile diff --git a/app/data/container-build/cerc-watcher-sushiswap/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-sushiswap/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-sushiswap/build.sh diff --git a/app/data/container-build/cerc-watcher-ts/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-ts/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-ts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-ts/Dockerfile diff --git a/app/data/container-build/cerc-watcher-ts/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-ts/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-ts/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-ts/build.sh diff --git a/app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/Dockerfile diff --git a/app/data/container-build/cerc-watcher-uniswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-uniswap-v3/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/build.sh diff --git a/app/data/container-build/cerc-webapp-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile similarity index 100% rename from app/data/container-build/cerc-webapp-base/Dockerfile rename to stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile diff --git a/app/data/container-build/cerc-webapp-base/apply-webapp-config.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/apply-webapp-config.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh diff --git a/app/data/container-build/cerc-webapp-base/build.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/build.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/build.sh diff --git a/app/data/container-build/cerc-webapp-base/config.yml b/stack_orchestrator/data/container-build/cerc-webapp-base/config.yml similarity index 100% rename from app/data/container-build/cerc-webapp-base/config.yml rename to stack_orchestrator/data/container-build/cerc-webapp-base/config.yml diff --git a/app/data/container-build/cerc-webapp-base/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/start-serving-app.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh diff --git a/app/data/container-build/default-build.sh b/stack_orchestrator/data/container-build/default-build.sh similarity index 100% rename from app/data/container-build/default-build.sh rename to stack_orchestrator/data/container-build/default-build.sh diff --git a/app/data/container-image-list.txt b/stack_orchestrator/data/container-image-list.txt similarity index 93% rename from app/data/container-image-list.txt rename to stack_orchestrator/data/container-image-list.txt index 256f0a6f..fd295be5 100644 --- a/app/data/container-image-list.txt +++ b/stack_orchestrator/data/container-image-list.txt @@ -57,3 +57,6 @@ cerc/nitro-contracts cerc/mobymask-snap cerc/ponder cerc/nitro-rpc-client +cerc/watcher-merkl-sushiswap-v3 +cerc/watcher-sushiswap-v3 +cerc/uniswap-interface diff --git a/app/data/npm-package-list.txt b/stack_orchestrator/data/npm-package-list.txt similarity index 100% rename from app/data/npm-package-list.txt rename to stack_orchestrator/data/npm-package-list.txt diff --git a/app/data/pod-list.txt b/stack_orchestrator/data/pod-list.txt similarity index 95% rename from app/data/pod-list.txt rename to stack_orchestrator/data/pod-list.txt index 4ba1bac0..9ad000c7 100644 --- a/app/data/pod-list.txt +++ b/stack_orchestrator/data/pod-list.txt @@ -43,3 +43,5 @@ nitro-contracts mobymask-snap ponder ipld-eth-server-payments +merkl-sushiswap-v3 +sushiswap-v3 diff --git a/app/data/repository-list.txt b/stack_orchestrator/data/repository-list.txt similarity index 92% rename from app/data/repository-list.txt rename to stack_orchestrator/data/repository-list.txt index ceaa910c..cddaccce 100644 --- a/app/data/repository-list.txt +++ b/stack_orchestrator/data/repository-list.txt @@ -47,3 +47,6 @@ github.com/cerc-io/go-nitro github.com/cerc-io/ts-nitro github.com/cerc-io/mobymask-snap github.com/cerc-io/ponder +github.com/cerc-io/merkl-sushiswap-v3-watcher-ts +github.com/cerc-io/sushiswap-v3-watcher-ts +github.com/cerc-io/uniswap-interface diff --git a/stack_orchestrator/data/stacks/act-runner/README.md b/stack_orchestrator/data/stacks/act-runner/README.md new file mode 100644 index 00000000..3c6dd7b1 --- /dev/null +++ b/stack_orchestrator/data/stacks/act-runner/README.md @@ -0,0 +1,13 @@ +# act-runner stack + +## Example + +``` +$ laconic-so --stack act-runner deploy init --output act-runner-1.yml --config CERC_GITEA_RUNNER_REGISTRATION_TOKEN=FOO +$ laconic-so --stack act-runner deploy create --spec-file act-runner-1.yml --deployment-dir ~/opt/deployments/act-runner-1 +$ laconic-so deployment --dir ~/opt/deployments/act-runner-1 up + +$ laconic-so --stack act-runner deploy init --output act-runner-2.yml --config CERC_GITEA_RUNNER_REGISTRATION_TOKEN=BAR +$ laconic-so --stack act-runner deploy create --spec-file act-runner-2.yml --deployment-dir ~/opt/deployments/act-runner-2 +$ laconic-so deployment --dir ~/opt/deployments/act-runner-2 up +``` diff --git a/stack_orchestrator/data/stacks/act-runner/stack.yml b/stack_orchestrator/data/stacks/act-runner/stack.yml new file mode 100644 index 00000000..a236fccf --- /dev/null +++ b/stack_orchestrator/data/stacks/act-runner/stack.yml @@ -0,0 +1,15 @@ +version: "1.1" +name: act-runner +description: "Local act-runner" +repos: + - git.vdb.to/cerc-io/hosting + - gitea.com/gitea/act_runner +containers: + - cerc/act-runner + - cerc/act-runner-task-executor +pods: + - name: act-runner + repository: cerc-io/hosting + path: act-runner + pre_start_command: "pre_start.sh" + post_start_command: "post_start.sh" diff --git a/stack_orchestrator/data/stacks/azimuth/README.md b/stack_orchestrator/data/stacks/azimuth/README.md new file mode 100644 index 00000000..f7d93f33 --- /dev/null +++ b/stack_orchestrator/data/stacks/azimuth/README.md @@ -0,0 +1,107 @@ +# Azimuth Watcher + +Instructions to setup and deploy Azimuth Watcher stack + +## Setup + +Prerequisite: `ipld-eth-server` RPC and GQL endpoints + +Clone required repositories: + +```bash +laconic-so --stack azimuth setup-repositories --pull +``` + +NOTE: If the repository already exists and checked out to a different version, `setup-repositories` command will throw an error. +For getting around this, the `azimuth-watcher-ts` repository can be removed and then run the command again. + +Build the container images: + +```bash +laconic-so --stack azimuth build-containers +``` + +This should create the required docker images in the local image registry. + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: +```bash +laconic-so --stack azimuth deploy init --output azimuth-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host + +```yaml +... +network: + ports: + watcher-db: + - 0.0.0.0:15432:5432 + azimuth-watcher-server: + - 0.0.0.0:3001:3001 + censures-watcher-server: + - 0.0.0.0:3002:3002 + claims-watcher-server: + - 0.0.0.0:3003:3003 + conditional-star-release-watcher-server: + - 0.0.0.0:3004:3004 + delegated-sending-watcher-server: + - 0.0.0.0:3005:3005 + ecliptic-watcher-server: + - 0.0.0.0:3006:3006 + linear-star-release-watcher-server: + - 0.0.0.0:3007:3007 + polls-watcher-server: + - 0.0.0.0:3008:3008 + gateway-server: + - 0.0.0.0:4000:4000 +... +``` + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack azimuth deploy create --spec-file azimuth-spec.yml --deployment-dir azimuth-deployment +``` + +## Set env variables + +Inside the deployment directory, open the file `config.env` and add variable to update RPC endpoint : + + ```bash + # External RPC endpoints + CERC_IPLD_ETH_RPC= + ``` + +* NOTE: If RPC endpoint is on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`) + +## Start the stack + +Start the deployment: +```bash +laconic-so deployment --dir azimuth-deployment start +``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + +## Clean up + +To stop all azimuth services running in the background, while preserving chain data: + +```bash +laconic-so deployment --dir azimuth-deployment stop +``` + +To stop all azimuth services and also delete data: + +```bash +laconic-so deployment --dir azimuth-deployment stop --delete-volumes +``` diff --git a/app/data/stacks/azimuth/stack.yml b/stack_orchestrator/data/stacks/azimuth/stack.yml similarity index 66% rename from app/data/stacks/azimuth/stack.yml rename to stack_orchestrator/data/stacks/azimuth/stack.yml index 47e0d058..7adbe663 100644 --- a/app/data/stacks/azimuth/stack.yml +++ b/stack_orchestrator/data/stacks/azimuth/stack.yml @@ -1,7 +1,7 @@ version: "1.0" name: azimuth repos: - - github.com/cerc-io/azimuth-watcher-ts@v0.1.1 + - github.com/cerc-io/azimuth-watcher-ts@v0.1.2 containers: - cerc/watcher-azimuth pods: diff --git a/app/data/stacks/build-support/README.md b/stack_orchestrator/data/stacks/build-support/README.md similarity index 100% rename from app/data/stacks/build-support/README.md rename to stack_orchestrator/data/stacks/build-support/README.md diff --git a/app/data/stacks/build-support/stack.yml b/stack_orchestrator/data/stacks/build-support/stack.yml similarity index 100% rename from app/data/stacks/build-support/stack.yml rename to stack_orchestrator/data/stacks/build-support/stack.yml diff --git a/app/data/stacks/chain-chunker/README.md b/stack_orchestrator/data/stacks/chain-chunker/README.md similarity index 100% rename from app/data/stacks/chain-chunker/README.md rename to stack_orchestrator/data/stacks/chain-chunker/README.md diff --git a/app/data/stacks/chain-chunker/stack.yml b/stack_orchestrator/data/stacks/chain-chunker/stack.yml similarity index 87% rename from app/data/stacks/chain-chunker/stack.yml rename to stack_orchestrator/data/stacks/chain-chunker/stack.yml index d85aa057..2705f69a 100644 --- a/app/data/stacks/chain-chunker/stack.yml +++ b/stack_orchestrator/data/stacks/chain-chunker/stack.yml @@ -6,8 +6,10 @@ repos: - git.vdb.to/cerc-io/eth-statediff-service@v5 - git.vdb.to/cerc-io/ipld-eth-db@v5 - git.vdb.to/cerc-io/ipld-eth-server@v5 + - git.vdb.to/cerc-io/plugeth@statediff containers: - cerc/ipld-eth-state-snapshot - cerc/eth-statediff-service - cerc/ipld-eth-db - cerc/ipld-eth-server + - cerc/plugeth diff --git a/app/data/stacks/erc20/README.md b/stack_orchestrator/data/stacks/erc20/README.md similarity index 100% rename from app/data/stacks/erc20/README.md rename to stack_orchestrator/data/stacks/erc20/README.md diff --git a/app/data/stacks/erc20/stack.yml b/stack_orchestrator/data/stacks/erc20/stack.yml similarity index 100% rename from app/data/stacks/erc20/stack.yml rename to stack_orchestrator/data/stacks/erc20/stack.yml diff --git a/app/data/stacks/erc721/README.md b/stack_orchestrator/data/stacks/erc721/README.md similarity index 100% rename from app/data/stacks/erc721/README.md rename to stack_orchestrator/data/stacks/erc721/README.md diff --git a/app/data/stacks/erc721/stack.yml b/stack_orchestrator/data/stacks/erc721/stack.yml similarity index 100% rename from app/data/stacks/erc721/stack.yml rename to stack_orchestrator/data/stacks/erc721/stack.yml diff --git a/app/data/stacks/fixturenet-eth-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-eth-loaded/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth-loaded/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth-loaded/README.md diff --git a/app/data/stacks/fixturenet-eth-loaded/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth-loaded/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth-loaded/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth-loaded/stack.yml diff --git a/app/data/stacks/fixturenet-eth-tx/README.md b/stack_orchestrator/data/stacks/fixturenet-eth-tx/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth-tx/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth-tx/README.md diff --git a/app/data/stacks/fixturenet-eth-tx/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth-tx/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth-tx/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth-tx/stack.yml diff --git a/app/data/stacks/fixturenet-eth/README.md b/stack_orchestrator/data/stacks/fixturenet-eth/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth/README.md diff --git a/app/data/stacks/fixturenet-eth/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth/stack.yml diff --git a/app/data/stacks/fixturenet-laconic-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md similarity index 100% rename from app/data/stacks/fixturenet-laconic-loaded/README.md rename to stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md diff --git a/app/data/stacks/fixturenet-laconic-loaded/stack.yml b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-laconic-loaded/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-laconic-loaded/stack.yml diff --git a/app/data/stacks/fixturenet-laconicd/README.md b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md similarity index 100% rename from app/data/stacks/fixturenet-laconicd/README.md rename to stack_orchestrator/data/stacks/fixturenet-laconicd/README.md diff --git a/app/data/stacks/fixturenet-laconicd/stack.yml b/stack_orchestrator/data/stacks/fixturenet-laconicd/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-laconicd/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-laconicd/stack.yml diff --git a/app/data/stacks/fixturenet-lotus/README.md b/stack_orchestrator/data/stacks/fixturenet-lotus/README.md similarity index 100% rename from app/data/stacks/fixturenet-lotus/README.md rename to stack_orchestrator/data/stacks/fixturenet-lotus/README.md diff --git a/app/data/stacks/fixturenet-lotus/stack.yml b/stack_orchestrator/data/stacks/fixturenet-lotus/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-lotus/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-lotus/stack.yml diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/README.md b/stack_orchestrator/data/stacks/fixturenet-optimism/README.md new file mode 100644 index 00000000..dd681aa5 --- /dev/null +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/README.md @@ -0,0 +1,204 @@ +# fixturenet-optimism + +Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2) + +We support running just the L2 part of stack, given an external L1 endpoint. Follow the [L2 only doc](./l2-only.md) for the same. + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack fixturenet-optimism setup-repositories + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +# The repositories are located in $HOME/cerc by default +``` + +Build the container images: + +```bash +laconic-so --stack fixturenet-optimism build-containers + +# If redeploying with changes in the stack containers +laconic-so --stack fixturenet-optimism build-containers --force-rebuild + +# If errors are thrown during build, old images used by this stack would have to be deleted +``` + +Note: this will take >10 mins depending on the specs of your machine, and **requires** 16GB of memory or greater. + +This should create the required docker images in the local image registry: +* `cerc/go-ethereum` +* `cerc/lighthouse` +* `cerc/fixturenet-eth-geth` +* `cerc/fixturenet-eth-lighthouse` +* `cerc/foundry` +* `cerc/optimism-contracts` +* `cerc/optimism-l2geth` +* `cerc/optimism-op-node` +* `cerc/optimism-op-batcher` +* `cerc/optimism-op-proposer` + + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: +```bash +laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed-random --output fixturenet-optimism-spec.yml +``` + +### Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | + +For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `fixturenet-eth-geth-1` RPC to port 8545 and the `op-geth` RPC to port 9545 on the host. + +Or, you may wish to use `any-same` for the initial mappings -- in which case you'll have to edit the spec to file to ensure the various geth instances aren't all trying to publish to host ports 8545/8546 at once. + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack fixturenet-optimism deploy create --spec-file fixturenet-optimism-spec.yml --deployment-dir fixturenet-optimism-deployment +``` + +## Start the stack +Start the deployment: +```bash +laconic-so deployment --dir fixturenet-optimism-deployment start +``` +1. The `fixturenet-eth` L1 chain will start up first and begin producing blocks. +2. The `fixturenet-optimism-contracts` service will configure and deploy the Optimism contracts to L1, exiting when complete. This may take several minutes; you can follow the progress by following the container's logs (see below). +3. The `op-node` and `op-geth` services will initialize themselves (if not already initialized) and start +4. The remaining services, `op-batcher` and `op-proposer` will start + +### Logs +To list and monitor the running containers: + +```bash +laconic-so --stack fixturenet-optimism deploy ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +## Example: bridge some ETH from L1 to L2 + +Send some ETH from the desired account to the `L1StandardBridgeProxy` contract on L1 to test bridging to L2. + +We can use the testing account `0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F` which is pre-funded and unlocked, and the `cerc/foundry:local` container to make use of the `cast` cli. + +1. Note the docker network the stack is running on: +```bash +docker network ls +# The network name will be something like laconic-[some_hash]_default +``` +2. Set some variables: +```bash +L1_RPC=http://fixturenet-eth-geth-1:8545 +L2_RPC=http://op-geth:8545 +NETWORK= +DEPLOYMENT_CONTEXT= +ACCOUNT=0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F +``` + +If you need to check the L1 chain-id, you can use: +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast chain-id --rpc-url $L1_RPC" +``` + +3. Check the account starting balance on L2 (it should be 0): +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast balance $ACCOUNT --rpc-url $L2_RPC" +# 0 +``` + +4. Read the bridge contract address from the L1 deployment records in the `op-node` container: +```bash +# get the container id for op-node +NODE_CONTAINER=$(docker ps --filter "name=op-node" -q) +BRIDGE=$(docker exec $NODE_CONTAINER cat /l1-deployment/$DEPLOYMENT_CONTEXT/L1StandardBridgeProxy.json | jq -r .address) +``` + +5. Use cast to send some ETH to the bridge contract: +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast send --from $ACCOUNT --value 1ether $BRIDGE --rpc-url $L1_RPC" +``` + +6. Allow a couple minutes for the bridge to complete + +7. Check the L2 balance again (it should show the bridged funds): +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast balance $ACCOUNT --rpc-url $L2_RPC" +# 1000000000000000000 +``` + +## Clean up + +To stop all services running in the background, while preserving chain data: + +```bash +laconic-so deployment --dir fixturenet-optimism-deployment stop +``` + +To stop all services and also delete chain data: + +```bash +laconic-so deployment --dir fixturenet-optimism-deployment stop --delete-volumes +``` + +## Troubleshooting + +* If `op-geth` service aborts or is restarted, the following error might occur in the `op-node` service: + + ```bash + WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found" + ``` + +* This means that the data directory that `op-geth` is using is corrupted and needs to be reinitialized; the containers `op-geth`, `op-node` and `op-batcher` need to be started afresh: + + WARNING: This will reset the L2 chain; consequently, all the data on it will be lost + + * Stop and remove the concerned containers: + + ```bash + # List the containers + docker ps -f "name=op-geth|op-node|op-batcher" + + # Force stop and remove the listed containers + docker rm -f $(docker ps -qf "name=op-geth|op-node|op-batcher") + ``` + + * Remove the concerned volume: + + ```bash + # List the volume + docker volume ls -q --filter name=l2_geth_data + + # Remove the listed volume + docker volume rm $(docker volume ls -q --filter name=l2_geth_data) + ``` + + * Re-run the deployment command used in [Deploy](#deploy) to restart the stopped containers + +## Known Issues + +* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side + * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py new file mode 100644 index 00000000..fa757cf5 --- /dev/null +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py @@ -0,0 +1,39 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from stack_orchestrator.deploy.deployment_context import DeploymentContext +from ruamel.yaml import YAML + + +def create(context: DeploymentContext, extra_args): + # Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1 + # We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the + # deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment + fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml') + + with open(fixturenet_eth_compose_file, 'r') as yaml_file: + yaml = YAML() + yaml_data = yaml.load(yaml_file) + + new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh' + + if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']: + yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script) + + with open(fixturenet_eth_compose_file, 'w') as yaml_file: + yaml = YAML() + yaml.dump(yaml_data, yaml_file) + + return None diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md b/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md new file mode 100644 index 00000000..4299ca8d --- /dev/null +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md @@ -0,0 +1,129 @@ +# fixturenet-optimism (L2-only) + +Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io) + +## Setup + +Prerequisite: An L1 Ethereum RPC endpoint + +Clone required repositories: + +```bash +laconic-so --stack fixturenet-optimism setup-repositories --exclude git.vdb.to/cerc-io/go-ethereum + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container images: + +```bash +laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher,cerc/optimism-op-proposer +``` + +This should create the required docker images in the local image registry: +* `cerc/foundry` +* `cerc/optimism-contracts` +* `cerc/optimism-l2geth` +* `cerc/optimism-op-node` +* `cerc/optimism-op-batcher` +* `cerc/optimism-op-proposer` + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: +```bash +laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed-random --output fixturenet-optimism-spec.yml +``` +### Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | + +For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `op-geth` RPC to an easy to remember port like 8545 or 9545 on the host. + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack fixturenet-optimism deploy create --spec-file fixturenet-optimism-spec.yml --deployment-dir fixturenet-optimism-deployment +``` + +Finally, open the `stack.yml` file inside your deployment directory and, under the `pods:` section, remove (or comment out) the entry for `fixturenet-eth`. This will prevent the deployment from trying to spin up a new L1 chain when starting the stack. + +## Set chain env variables + +Inside the deployment directory, open the file `config.env` and add the following variables to point the stack at your L1 rpc and provide account credentials ([defaults](../../config/fixturenet-optimism/l1-params.env)): + + ```bash + # External L1 endpoint + CERC_L1_CHAIN_ID= + CERC_L1_RPC= + CERC_L1_HOST= + CERC_L1_PORT= + + # URL to get CSV with credentials for accounts on L1 + # that are used to send balance to Optimism Proxy contract + # (enables them to do transactions on L2) + CERC_L1_ACCOUNTS_CSV_URL= + + # OR + # Specify the required account credentials for the Admin account + # Other generated accounts will be funded from this account, so it should contain ~20 Eth + CERC_L1_ADDRESS= + CERC_L1_PRIV_KEY= + ``` + +* NOTE: If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`) + +## Start the stack +Start the deployment: +```bash +laconic-so deployment --dir fixturenet-optimism-deployment start +``` +1. The stack will check for a response from the L1 endpoint specified in your env file. +2. The `fixturenet-optimism-contracts` service will configure and deploy the Optimism contracts to L1, exiting when complete. This may take several minutes; you can follow the progress by following the container's logs (see below). +3. The `op-node` and `op-geth` services will initialize themselves (if not already initialized) and start +4. The remaining services, `op-batcher` and `op-proposer` will start + +### Logs +To list and monitor the running containers: + +```bash +laconic-so --stack fixturenet-optimism deploy ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +## Clean up + +To stop all L2 services running in the background, while preserving chain data: + +```bash +laconic-so deployment --dir fixturenet-optimism-deployment stop +``` + +To stop all L2 services and also delete chain data: + +```bash +laconic-so deployment --dir fixturenet-optimism-deployment stop --delete-volumes +``` + +## Troubleshooting + +See [Troubleshooting](./README.md#troubleshooting) diff --git a/app/data/stacks/fixturenet-optimism/stack.yml b/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml similarity index 84% rename from app/data/stacks/fixturenet-optimism/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml index 75c7620b..bca34b16 100644 --- a/app/data/stacks/fixturenet-optimism/stack.yml +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml @@ -5,8 +5,8 @@ repos: - git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5 - git.vdb.to/cerc-io/lighthouse - github.com/dboreham/foundry - - github.com/ethereum-optimism/optimism@v1.0.4 - - github.com/ethereum-optimism/op-geth@v1.101105.2 + - github.com/ethereum-optimism/optimism@op-node/v1.3.0 + - github.com/ethereum-optimism/op-geth@v1.101304.0 containers: - cerc/go-ethereum - cerc/lighthouse diff --git a/app/data/stacks/fixturenet-payments/README.md b/stack_orchestrator/data/stacks/fixturenet-payments/README.md similarity index 100% rename from app/data/stacks/fixturenet-payments/README.md rename to stack_orchestrator/data/stacks/fixturenet-payments/README.md diff --git a/app/data/stacks/fixturenet-payments/mobymask-demo.md b/stack_orchestrator/data/stacks/fixturenet-payments/mobymask-demo.md similarity index 100% rename from app/data/stacks/fixturenet-payments/mobymask-demo.md rename to stack_orchestrator/data/stacks/fixturenet-payments/mobymask-demo.md diff --git a/app/data/stacks/fixturenet-payments/ponder-demo.md b/stack_orchestrator/data/stacks/fixturenet-payments/ponder-demo.md similarity index 100% rename from app/data/stacks/fixturenet-payments/ponder-demo.md rename to stack_orchestrator/data/stacks/fixturenet-payments/ponder-demo.md diff --git a/app/data/stacks/fixturenet-payments/stack.yml b/stack_orchestrator/data/stacks/fixturenet-payments/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-payments/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-payments/stack.yml diff --git a/app/data/stacks/fixturenet-plugeth-tx/README.md b/stack_orchestrator/data/stacks/fixturenet-plugeth-tx/README.md similarity index 100% rename from app/data/stacks/fixturenet-plugeth-tx/README.md rename to stack_orchestrator/data/stacks/fixturenet-plugeth-tx/README.md diff --git a/app/data/stacks/fixturenet-plugeth-tx/stack.yml b/stack_orchestrator/data/stacks/fixturenet-plugeth-tx/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-plugeth-tx/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-plugeth-tx/stack.yml diff --git a/app/data/stacks/fixturenet-pocket/README.md b/stack_orchestrator/data/stacks/fixturenet-pocket/README.md similarity index 100% rename from app/data/stacks/fixturenet-pocket/README.md rename to stack_orchestrator/data/stacks/fixturenet-pocket/README.md diff --git a/app/data/stacks/fixturenet-pocket/stack.yml b/stack_orchestrator/data/stacks/fixturenet-pocket/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-pocket/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-pocket/stack.yml diff --git a/app/data/stacks/fixturenet-sushiswap-subgraph/README.md b/stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/README.md similarity index 100% rename from app/data/stacks/fixturenet-sushiswap-subgraph/README.md rename to stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/README.md diff --git a/app/data/stacks/fixturenet-sushiswap-subgraph/stack.yml b/stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-sushiswap-subgraph/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/stack.yml diff --git a/app/data/stacks/gelato/README.md b/stack_orchestrator/data/stacks/gelato/README.md similarity index 100% rename from app/data/stacks/gelato/README.md rename to stack_orchestrator/data/stacks/gelato/README.md diff --git a/app/data/stacks/gelato/stack.yml b/stack_orchestrator/data/stacks/gelato/stack.yml similarity index 100% rename from app/data/stacks/gelato/stack.yml rename to stack_orchestrator/data/stacks/gelato/stack.yml diff --git a/app/data/stacks/graph-node/README.md b/stack_orchestrator/data/stacks/graph-node/README.md similarity index 96% rename from app/data/stacks/graph-node/README.md rename to stack_orchestrator/data/stacks/graph-node/README.md index 0527efc0..df3ae1eb 100644 --- a/app/data/stacks/graph-node/README.md +++ b/stack_orchestrator/data/stacks/graph-node/README.md @@ -59,7 +59,7 @@ ports: Create deployment: ```bash -laconic-so deploy create --spec-file graph-node-spec.yml --deployment-dir graph-node-deployment +laconic-so --stack graph-node deploy create --spec-file graph-node-spec.yml --deployment-dir graph-node-deployment ``` ## Start the stack diff --git a/app/data/stacks/graph-node/deploy-subgraph.md b/stack_orchestrator/data/stacks/graph-node/deploy-subgraph.md similarity index 100% rename from app/data/stacks/graph-node/deploy-subgraph.md rename to stack_orchestrator/data/stacks/graph-node/deploy-subgraph.md diff --git a/app/data/stacks/graph-node/stack.yml b/stack_orchestrator/data/stacks/graph-node/stack.yml similarity index 100% rename from app/data/stacks/graph-node/stack.yml rename to stack_orchestrator/data/stacks/graph-node/stack.yml diff --git a/app/data/stacks/kubo/README.md b/stack_orchestrator/data/stacks/kubo/README.md similarity index 100% rename from app/data/stacks/kubo/README.md rename to stack_orchestrator/data/stacks/kubo/README.md diff --git a/app/data/stacks/kubo/stack.yml b/stack_orchestrator/data/stacks/kubo/stack.yml similarity index 100% rename from app/data/stacks/kubo/stack.yml rename to stack_orchestrator/data/stacks/kubo/stack.yml diff --git a/app/data/stacks/laconic-dot-com/README.md b/stack_orchestrator/data/stacks/laconic-dot-com/README.md similarity index 100% rename from app/data/stacks/laconic-dot-com/README.md rename to stack_orchestrator/data/stacks/laconic-dot-com/README.md diff --git a/app/data/stacks/laconic-dot-com/stack.yml b/stack_orchestrator/data/stacks/laconic-dot-com/stack.yml similarity index 100% rename from app/data/stacks/laconic-dot-com/stack.yml rename to stack_orchestrator/data/stacks/laconic-dot-com/stack.yml diff --git a/app/data/stacks/lasso/README.md b/stack_orchestrator/data/stacks/lasso/README.md similarity index 100% rename from app/data/stacks/lasso/README.md rename to stack_orchestrator/data/stacks/lasso/README.md diff --git a/app/data/stacks/lasso/stack.yml b/stack_orchestrator/data/stacks/lasso/stack.yml similarity index 100% rename from app/data/stacks/lasso/stack.yml rename to stack_orchestrator/data/stacks/lasso/stack.yml diff --git a/stack_orchestrator/data/stacks/mainnet-eth-plugeth/README.md b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/README.md new file mode 100644 index 00000000..8ed6bebb --- /dev/null +++ b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/README.md @@ -0,0 +1,141 @@ +# mainnet-eth-plugeth + +Deploys a "head-tracking" mainnet Ethereum stack comprising a [plugeth](https://git.vdb.to/cerc-io/plugeth) execution layer node and a [lighthouse](https://github.com/sigp/lighthouse) consensus layer node, with [plugeth-statediff](https://git.vdb.to/cerc-io/plugeth-statediff) for statediffing, [ipld-eth-db](https://git.vdb.to/cerc-io/ipld-eth-db) for storage, and [ipld-eth-server](https://git.vdb.to/cerc-io/ipld-eth-server) for indexed ETH IPLD objects. + +## Clone required repositories + +``` +$ laconic-so --stack mainnet-eth-plugeth setup-repositories +``` + +## Build containers + +``` +$ laconic-so --stack mainnet-eth-plugeth build-containers +``` + +## Create a deployment + +``` +$ laconic-so --stack mainnet-eth-plugeth deploy init --map-ports-to-host any-same --output mainnet-eth-plugeth-spec.yml +$ laconic-so --stack mainnet-eth-plugeth deploy create --spec-file mainnet-eth-plugeth-spec.yml --deployment-dir mainnet-eth-plugeth-deployment +``` +## Start the stack +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment start +``` +Display stack status: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment ps +Running containers: +id: f39608eca04d72d6b0f1f3acefc5ebb52908da06e221d20c7138f7e3dff5e423, name: laconic-ef641b4d13eb61ed561b19be67063241-foundry-1, ports: +id: 4052b1eddd886ae0d6b41f9ff22e68a70f267b2bfde10f4b7b79b5bd1eeddcac, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-geth-1-1, ports: 30303/tcp, 30303/udp, 0.0.0.0:49184->40000/tcp, 0.0.0.0:49185->6060/tcp, 0.0.0.0:49186->8545/tcp, 8546/tcp +id: ac331232e597944b621b3b8942ace5dafb14524302cab338ff946c7f6e5a1d52, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1, ports: 0.0.0.0:49187->8001/tcp +``` +See stack logs: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment logs +time="2023-07-25T09:46:29-06:00" level=warning msg="The \"CERC_SCRIPT_DEBUG\" variable is not set. Defaulting to a blank string." +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.362 INFO Logging to file path: "/var/lighthouse-data-dir/beacon/logs/beacon.log" +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Lighthouse started version: Lighthouse/v4.1.0-693886b +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Configured for network name: mainnet +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Data directory initialised datadir: /var/lighthouse-data-dir +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Deposit contract address: 0x00000000219ab540356cbb839cbe05303d7705fa, deploy_block: 11184524 +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.424 INFO Starting checkpoint sync remote_url: https://beaconstate.ethstaker.cc/, service: beacon +``` +## Monitoring stack sync progress +Both go-ethereum and lighthouse will engage in an initial chain sync phase that will last up to several hours depending on hardware performance and network capacity. +Syncing can be monitored by looking for these log messages: +``` +Jul 24 12:34:17.001 INFO Downloading historical blocks est_time: 5 days 11 hrs, speed: 14.67 slots/sec, distance: 6932481 slots (137 weeks 3 days), service: slot_notifier +INFO [07-24|12:14:52.493] Syncing beacon headers downloaded=145,920 left=17,617,968 eta=1h23m32.815s +INFO [07-24|12:33:15.238] Syncing: chain download in progress synced=1.86% chain=148.94MiB headers=368,640@95.03MiB bodies=330,081@40.56MiB receipts=330,081@13.35MiB eta=37m54.505s +INFO [07-24|12:35:13.028] Syncing: state download in progress synced=1.32% state=4.64GiB accounts=2,850,314@677.57MiB slots=18,663,070@3.87GiB codes=26662@111.14MiB eta=3h18m0.699s +``` +Once synced up these log messages will be observed: +``` +INFO Synced slot: 6952515, block: 0x5bcb…f6d9, epoch: 217266, finalized_epoch: 217264, finalized_root: 0x6342…2c5c, exec_hash: 0x8d8c…2443 (verified), peers: 31, service: slot_notifier +INFO [07-25|03:04:48.941] Imported new potential chain segment number=17,767,316 hash=84f6e7..bc2cb0 blocks=1 txs=137 mgas=16.123 elapsed=57.087ms mgasps=282.434 dirty=461.46MiB +INFO [07-25|03:04:49.042] Chain head was updated number=17,767,316 hash=84f6e7..bc2cb0 root=ca58b2..8258c1 elapsed=2.480111ms +``` +## Clean up + +Stop the stack: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop +``` +This leaves data volumes in place, allowing the stack to be subsequently re-started. +To permanently *delete* the stack's data volumes run: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop --delete-data-volumes +``` +After deleting the volumes, any subsequent re-start will begin chain sync from cold. + +## Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be +customized by editing the "spec" file generated by `laconic-so deploy init`. + +In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while +port `40000` has been left with random assignment: +``` +$ cat mainnet-eth-plugeth-spec.yml +stack: mainnet-eth-plugeth +ports: + mainnet-eth-plugeth-geth-1: + - '10.10.10.10:8545:8545' + - '40000' + mainnet-eth-plugeth-lighthouse-1: + - '10.10.10.10:5052:5052' +volumes: + mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data + mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data + mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data +``` +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | +## Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory: +``` +$ cat mainnet-eth-plugeth-spec.yml +stack: mainnet-eth-plugeth +ports: + mainnet-eth-plugeth-geth-1: + - '10.10.10.10:8545:8545' + - '40000' + mainnet-eth-plugeth-lighthouse-1: + - '10.10.10.10:5052:5052' +volumes: + mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data + mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data + mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data +``` +A synced-up stack will consume around 900GB of data volume space: +``` +$ sudo du -h mainnet-eth-plugeth-deployment/data/ +150M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/freezer_db +25G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/chain_db +16K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/network +368M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/logs +26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon +26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data +8.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_config_data +4.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/keystore +527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient/chain +527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient +859G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata +4.8M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/nodes +242M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/ethash +669M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/triecache +860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth +860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data +885G mainnet-eth-plugeth-deployment/data/ +``` diff --git a/app/deploy/deployer_factory.py b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py similarity index 58% rename from app/deploy/deployer_factory.py rename to stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py index de89b72c..5aba9547 100644 --- a/app/deploy/deployer_factory.py +++ b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py @@ -13,14 +13,20 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.deploy.k8s.deploy_k8s import K8sDeployer -from app.deploy.compose.deploy_docker import DockerDeployer +from secrets import token_hex -def getDeployer(type, compose_files, compose_project_name, compose_env_file): - if type == "compose" or type is None: - return DockerDeployer(compose_files, compose_project_name, compose_env_file) - elif type == "k8s": - return K8sDeployer(compose_files, compose_project_name, compose_env_file) - else: - print(f"ERROR: deploy-to {type} is not valid") +def init(ctx): + return None + + +def setup(ctx): + return None + + +def create(ctx, extra_args): + # Generate the JWT secret and save to its config file + secret = token_hex(32) + jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret") + with open(jwt_file_path, 'w+') as jwt_file: + jwt_file.write(secret) diff --git a/stack_orchestrator/data/stacks/mainnet-eth-plugeth/stack.yml b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/stack.yml new file mode 100644 index 00000000..7ade244c --- /dev/null +++ b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/stack.yml @@ -0,0 +1,29 @@ +version: "1.2" +name: mainnet-eth +description: "Ethereum Mainnet" +repos: + - git.vdb.to/cerc-io/plugeth@statediff + - git.vdb.to/cerc-io/plugeth-statediff + - git.vdb.to/cerc-io/lighthouse + - git.vdb.to/cerc-io/ipld-eth-db@v5 + - git.vdb.to/cerc-io/ipld-eth-server@v5 + - git.vdb.to/cerc-io/keycloak-reg-api + - git.vdb.to/cerc-io/keycloak-reg-ui +containers: + - cerc/plugeth-statediff + - cerc/plugeth + - cerc/plugeth-with-plugins + - cerc/lighthouse + - cerc/lighthouse-cli + - cerc/ipld-eth-db + - cerc/ipld-eth-server + - cerc/keycloak + - cerc/webapp-base + - cerc/keycloak-reg-api + - cerc/keycloak-reg-ui +pods: + - mainnet-eth-plugeth + - mainnet-eth-ipld-eth-db + - mainnet-eth-ipld-eth-server + - mainnet-eth-keycloak + - mainnet-eth-metrics diff --git a/app/data/stacks/mainnet-eth/README.md b/stack_orchestrator/data/stacks/mainnet-eth/README.md similarity index 100% rename from app/data/stacks/mainnet-eth/README.md rename to stack_orchestrator/data/stacks/mainnet-eth/README.md diff --git a/app/data/stacks/mainnet-eth/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py similarity index 100% rename from app/data/stacks/mainnet-eth/deploy/commands.py rename to stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py diff --git a/app/data/stacks/mainnet-eth/stack.yml b/stack_orchestrator/data/stacks/mainnet-eth/stack.yml similarity index 100% rename from app/data/stacks/mainnet-eth/stack.yml rename to stack_orchestrator/data/stacks/mainnet-eth/stack.yml diff --git a/app/data/stacks/mainnet-go-opera/README.md b/stack_orchestrator/data/stacks/mainnet-go-opera/README.md similarity index 100% rename from app/data/stacks/mainnet-go-opera/README.md rename to stack_orchestrator/data/stacks/mainnet-go-opera/README.md diff --git a/app/data/stacks/mainnet-go-opera/stack.yml b/stack_orchestrator/data/stacks/mainnet-go-opera/stack.yml similarity index 100% rename from app/data/stacks/mainnet-go-opera/stack.yml rename to stack_orchestrator/data/stacks/mainnet-go-opera/stack.yml diff --git a/app/data/stacks/mainnet-laconic/README.md b/stack_orchestrator/data/stacks/mainnet-laconic/README.md similarity index 100% rename from app/data/stacks/mainnet-laconic/README.md rename to stack_orchestrator/data/stacks/mainnet-laconic/README.md diff --git a/app/data/stacks/mainnet-laconic/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py similarity index 97% rename from app/data/stacks/mainnet-laconic/deploy/commands.py rename to stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py index 030200f1..b611a0d6 100644 --- a/app/data/stacks/mainnet-laconic/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py @@ -13,11 +13,11 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.util import get_yaml -from app.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext -from app.deploy.stack_state import State -from app.deploy.deploy_util import VolumeMapping, run_container_command -from app.command_types import CommandOptions +from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext +from stack_orchestrator.deploy.stack_state import State +from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command +from stack_orchestrator.command_types import CommandOptions from enum import Enum from pathlib import Path from shutil import copyfile, copytree diff --git a/app/data/stacks/mainnet-laconic/stack.yml b/stack_orchestrator/data/stacks/mainnet-laconic/stack.yml similarity index 100% rename from app/data/stacks/mainnet-laconic/stack.yml rename to stack_orchestrator/data/stacks/mainnet-laconic/stack.yml diff --git a/app/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh b/stack_orchestrator/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh similarity index 100% rename from app/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh rename to stack_orchestrator/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md new file mode 100644 index 00000000..4284c2ad --- /dev/null +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md @@ -0,0 +1,81 @@ +# Merkl SushiSwap v3 Watcher + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack merkl-sushiswap-v3 setup-repositories --git-ssh --pull +``` + +Build the container images: + +```bash +laconic-so --stack merkl-sushiswap-v3 build-containers +``` + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step: + + ```bash + # External Filecoin (ETH RPC) endpoint to point the watcher + CERC_ETH_RPC_ENDPOINT= + ``` + +### Deploy the stack + +```bash +laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-file up +``` + +* To list down and monitor the running containers: + + ```bash + laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 ps + + # With status + docker ps -a + + # Check logs for a container + docker logs -f + ``` + +* Open the GQL playground at http://localhost:3007/graphql + + ```graphql + { + _meta { + block { + number + timestamp + } + hasIndexingErrors + } + + factories { + id + poolCount + } + } + ``` + +## Clean up + +Stop all the services running in background: + +```bash +laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=merkl_sushiswap_v3" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=merkl_sushiswap_v3") +``` diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml new file mode 100644 index 00000000..3f9dd43e --- /dev/null +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: merkl-sushiswap-v3 +description: "SushiSwap v3 watcher stack" +repos: + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.4 +containers: + - cerc/watcher-merkl-sushiswap-v3 +pods: + - watcher-merkl-sushiswap-v3 diff --git a/app/data/stacks/mobymask-v2/README.md b/stack_orchestrator/data/stacks/mobymask-v2/README.md similarity index 100% rename from app/data/stacks/mobymask-v2/README.md rename to stack_orchestrator/data/stacks/mobymask-v2/README.md diff --git a/app/data/stacks/mobymask-v2/demo.md b/stack_orchestrator/data/stacks/mobymask-v2/demo.md similarity index 100% rename from app/data/stacks/mobymask-v2/demo.md rename to stack_orchestrator/data/stacks/mobymask-v2/demo.md diff --git a/app/data/stacks/mobymask-v2/mobymask-only.md b/stack_orchestrator/data/stacks/mobymask-v2/mobymask-only.md similarity index 100% rename from app/data/stacks/mobymask-v2/mobymask-only.md rename to stack_orchestrator/data/stacks/mobymask-v2/mobymask-only.md diff --git a/app/data/stacks/mobymask-v2/stack.yml b/stack_orchestrator/data/stacks/mobymask-v2/stack.yml similarity index 100% rename from app/data/stacks/mobymask-v2/stack.yml rename to stack_orchestrator/data/stacks/mobymask-v2/stack.yml diff --git a/app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md b/stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md similarity index 100% rename from app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md rename to stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md diff --git a/app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md b/stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md similarity index 100% rename from app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md rename to stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md diff --git a/app/data/stacks/mobymask-v2/web-apps.md b/stack_orchestrator/data/stacks/mobymask-v2/web-apps.md similarity index 100% rename from app/data/stacks/mobymask-v2/web-apps.md rename to stack_orchestrator/data/stacks/mobymask-v2/web-apps.md diff --git a/app/data/stacks/mobymask-v3/README.md b/stack_orchestrator/data/stacks/mobymask-v3/README.md similarity index 100% rename from app/data/stacks/mobymask-v3/README.md rename to stack_orchestrator/data/stacks/mobymask-v3/README.md diff --git a/app/data/stacks/mobymask-v3/stack.yml b/stack_orchestrator/data/stacks/mobymask-v3/stack.yml similarity index 100% rename from app/data/stacks/mobymask-v3/stack.yml rename to stack_orchestrator/data/stacks/mobymask-v3/stack.yml diff --git a/app/data/stacks/mobymask-v3/watcher.md b/stack_orchestrator/data/stacks/mobymask-v3/watcher.md similarity index 100% rename from app/data/stacks/mobymask-v3/watcher.md rename to stack_orchestrator/data/stacks/mobymask-v3/watcher.md diff --git a/app/data/stacks/mobymask-v3/web-app.md b/stack_orchestrator/data/stacks/mobymask-v3/web-app.md similarity index 100% rename from app/data/stacks/mobymask-v3/web-app.md rename to stack_orchestrator/data/stacks/mobymask-v3/web-app.md diff --git a/app/data/stacks/mobymask/README.md b/stack_orchestrator/data/stacks/mobymask/README.md similarity index 100% rename from app/data/stacks/mobymask/README.md rename to stack_orchestrator/data/stacks/mobymask/README.md diff --git a/app/data/stacks/mobymask/stack.yml b/stack_orchestrator/data/stacks/mobymask/stack.yml similarity index 100% rename from app/data/stacks/mobymask/stack.yml rename to stack_orchestrator/data/stacks/mobymask/stack.yml diff --git a/app/data/stacks/package-registry/README.md b/stack_orchestrator/data/stacks/package-registry/README.md similarity index 100% rename from app/data/stacks/package-registry/README.md rename to stack_orchestrator/data/stacks/package-registry/README.md diff --git a/app/data/stacks/package-registry/stack.yml b/stack_orchestrator/data/stacks/package-registry/stack.yml similarity index 70% rename from app/data/stacks/package-registry/stack.yml rename to stack_orchestrator/data/stacks/package-registry/stack.yml index 33c6c939..f6367ab1 100644 --- a/app/data/stacks/package-registry/stack.yml +++ b/stack_orchestrator/data/stacks/package-registry/stack.yml @@ -13,3 +13,8 @@ pods: path: gitea pre_start_command: "run-this-first.sh" post_start_command: "initialize-gitea.sh" + - name: act-runner + repository: cerc-io/hosting + path: act-runner + pre_start_command: "pre_start.sh" + post_start_command: "post_start.sh" diff --git a/stack_orchestrator/data/stacks/proxy-server/README.md b/stack_orchestrator/data/stacks/proxy-server/README.md new file mode 100644 index 00000000..f0ccdb0f --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/README.md @@ -0,0 +1,79 @@ +# Proxy Server + +Instructions to setup and deploy a HTTP proxy server + +## Setup + +Clone required repository: + +```bash +laconic-so --stack proxy-server setup-repositories --pull + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container image: + +```bash +laconic-so --stack proxy-server build-containers +``` + +## Create a deployment + +* First, create a spec file for the deployment, which will allow mapping the stack's ports and volumes to the host: + + ```bash + laconic-so --stack proxy-server deploy init --output proxy-server-spec.yml + ``` + +* Edit `network` in spec file to map container ports to same ports in host: + + ```yml + ... + network: + ports: + proxy-server: + - '4000:4000' + ... + ``` + +* Once you've made any needed changes to the spec file, create a deployment from it: + + ```bash + laconic-so --stack proxy-server deploy create --spec-file proxy-server-spec.yml --deployment-dir proxy-server-deployment + ``` + +* Inside the deployment directory, open the file `config.env` and set the following env variables: + + ```bash + # Whether to run the proxy server (Optional) (Default: true) + ENABLE_PROXY= + + # Upstream endpoint + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM= + + # Origin header to be used (Optional) + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER= + ``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir proxy-server-deployment start +``` + +* List and check the health status of the container using `docker ps` + +* The proxy server will now be listening at http://localhost:4000 + +## Clean up + +To stop the service running in background: + +```bash +laconic-so deployment --dir proxy-server-deployment stop +``` diff --git a/stack_orchestrator/data/stacks/proxy-server/stack.yml b/stack_orchestrator/data/stacks/proxy-server/stack.yml new file mode 100644 index 00000000..313a7f91 --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/stack.yml @@ -0,0 +1,8 @@ +version: "0.1" +name: proxy-server +repos: + - github.com/cerc-io/watcher-ts@v0.2.78 +containers: + - cerc/watcher-ts +pods: + - proxy-server diff --git a/app/data/stacks/reth/README.md b/stack_orchestrator/data/stacks/reth/README.md similarity index 100% rename from app/data/stacks/reth/README.md rename to stack_orchestrator/data/stacks/reth/README.md diff --git a/app/data/stacks/reth/stack.yml b/stack_orchestrator/data/stacks/reth/stack.yml similarity index 100% rename from app/data/stacks/reth/stack.yml rename to stack_orchestrator/data/stacks/reth/stack.yml diff --git a/app/data/stacks/sushiswap-subgraph/README.md b/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md similarity index 100% rename from app/data/stacks/sushiswap-subgraph/README.md rename to stack_orchestrator/data/stacks/sushiswap-subgraph/README.md diff --git a/app/data/stacks/sushiswap-subgraph/stack.yml b/stack_orchestrator/data/stacks/sushiswap-subgraph/stack.yml similarity index 100% rename from app/data/stacks/sushiswap-subgraph/stack.yml rename to stack_orchestrator/data/stacks/sushiswap-subgraph/stack.yml diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/README.md b/stack_orchestrator/data/stacks/sushiswap-v3/README.md new file mode 100644 index 00000000..7116a6d9 --- /dev/null +++ b/stack_orchestrator/data/stacks/sushiswap-v3/README.md @@ -0,0 +1,62 @@ +# SushiSwap v3 Watcher + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack sushiswap-v3 setup-repositories --git-ssh --pull +``` + +Build the container images: + +```bash +laconic-so --stack sushiswap-v3 build-containers +``` + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step: + + ```bash + # External Filecoin (ETH RPC) endpoint to point the watcher + CERC_ETH_RPC_ENDPOINT= + ``` + +### Deploy the stack + +```bash +laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file up +``` + +* To list down and monitor the running containers: + + ```bash + laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 ps + + # With status + docker ps -a + + # Check logs for a container + docker logs -f + ``` + +## Clean up + +Stop all the services running in background: + +```bash +laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=sushiswap_v3" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=sushiswap_v3") +``` diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml new file mode 100644 index 00000000..49c604bf --- /dev/null +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: sushiswap-v3 +description: "SushiSwap v3 watcher stack" +repos: + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.4 +containers: + - cerc/watcher-sushiswap-v3 +pods: + - watcher-sushiswap-v3 diff --git a/app/data/stacks/sushiswap/README.md b/stack_orchestrator/data/stacks/sushiswap/README.md similarity index 100% rename from app/data/stacks/sushiswap/README.md rename to stack_orchestrator/data/stacks/sushiswap/README.md diff --git a/app/data/stacks/sushiswap/smoke-tests.md b/stack_orchestrator/data/stacks/sushiswap/smoke-tests.md similarity index 100% rename from app/data/stacks/sushiswap/smoke-tests.md rename to stack_orchestrator/data/stacks/sushiswap/smoke-tests.md diff --git a/app/data/stacks/sushiswap/stack.yml b/stack_orchestrator/data/stacks/sushiswap/stack.yml similarity index 100% rename from app/data/stacks/sushiswap/stack.yml rename to stack_orchestrator/data/stacks/sushiswap/stack.yml diff --git a/app/data/stacks/test/README.md b/stack_orchestrator/data/stacks/test/README.md similarity index 100% rename from app/data/stacks/test/README.md rename to stack_orchestrator/data/stacks/test/README.md diff --git a/app/data/stacks/test/deploy/commands.py b/stack_orchestrator/data/stacks/test/deploy/commands.py similarity index 88% rename from app/data/stacks/test/deploy/commands.py rename to stack_orchestrator/data/stacks/test/deploy/commands.py index 2eebeea2..e6601eae 100644 --- a/app/data/stacks/test/deploy/commands.py +++ b/stack_orchestrator/data/stacks/test/deploy/commands.py @@ -13,10 +13,10 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.util import get_yaml -from app.deploy.deploy_types import DeployCommandContext -from app.deploy.stack_state import State -from app.deploy.deploy_util import VolumeMapping, run_container_command +from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.deploy_types import DeployCommandContext +from stack_orchestrator.deploy.stack_state import State +from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command from pathlib import Path default_spec_file_content = """config: diff --git a/app/data/stacks/test/stack.yml b/stack_orchestrator/data/stacks/test/stack.yml similarity index 100% rename from app/data/stacks/test/stack.yml rename to stack_orchestrator/data/stacks/test/stack.yml diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md new file mode 100644 index 00000000..7499f5fc --- /dev/null +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -0,0 +1,151 @@ +# Self-hosted Uniswap Frontend + +Instructions to setup and deploy Uniswap app on Urbit + +Build and deploy: + +- Urbit +- Uniswap app + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack uniswap-urbit-app setup-repositories --pull + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container images: + +```bash +laconic-so --stack uniswap-urbit-app build-containers +``` + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: + +```bash +laconic-so --stack uniswap-urbit-app deploy init --output uniswap-urbit-app-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host + +``` +... +network: + ports: + urbit-fake-ship: + - '8080:80' + proxy-server: + - '4000:4000' + ipfs-glob-host: + - '8081:8080' + - '5001:5001' +... +``` + +### Data volumes + +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: + +```bash +laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app-spec.yml --deployment-dir uniswap-urbit-app-deployment +``` + +## Set env variables + +Inside the deployment directory, open the file `config.env` and set the following env variables: + + ```bash + # External RPC endpoints + # https://docs.infura.io/getting-started#2-create-an-api-key + CERC_INFURA_KEY= + + # Uniswap API GQL Endpoint + # Set this to GQL proxy server endpoint for uniswap app + # (Eg. http://localhost:4000/v1/graphql) + # (Eg. https://abc.xyz.com/v1/graphql) + CERC_UNISWAP_GQL= + + # Optional + + # Whether to run the proxy GQL server + # (Disable only if proxy not required to be run) (Default: true) + ENABLE_PROXY= + + # Proxy server configuration + # Used only if proxy is enabled + + # Upstream API URL + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM=https://api.uniswap.org + + # Origin header to be used in the proxy + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER=https://app.uniswap.org + + # IPFS configuration + + # IFPS endpoint to host the glob file on + # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) + CERC_IPFS_GLOB_HOST_ENDPOINT= + + # IFPS endpoint to fetch the glob file from + # (Default: http://ipfs-glob-host:8080 pointing to in-stack IPFS node) + CERC_IPFS_SERVER_ENDPOINT= + ``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment start +``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + +* Run the following to get login password for Urbit web interface: + + ```bash + laconic-so deployment --dir uniswap-urbit-app-deployment exec urbit-fake-ship "curl -s --data '{\"source\":{\"dojo\":\"+code\"},\"sink\":{\"stdout\":null}}' http://localhost:12321" + + # Expected output: "\n"% + ``` + +* Open the Urbit web UI at http://localhost:8080 and use the `PASSWORD` from previous step to login + +* The uniswap app is not available when starting stack for the first time. Check `urbit-fake-ship` logs to see that app has installed + ``` + laconic-so deployment --dir uniswap-urbit-app-deployment logs -f + + # Expected output: + # laconic-3ccf7ee79bdae874-urbit-fake-ship-1 | docket: fetching %http glob for %uniswap desk + # laconic-3ccf7ee79bdae874-urbit-fake-ship-1 | ">="">="Uniswap app installed + ``` + +* The uniswap app will be now visible at http://localhost:8080 + +## Clean up + +To stop all uniswap-urbit-app services running in the background, while preserving data: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment stop +``` + +To stop all uniswap-urbit-app services and also delete data: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment stop --delete-volumes +``` diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml new file mode 100644 index 00000000..3f77098f --- /dev/null +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml @@ -0,0 +1,13 @@ +version: "0.1" +name: uniswap-urbit-app +repos: + - github.com/cerc-io/uniswap-interface@laconic # TODO: Use release + - github.com/cerc-io/watcher-ts@v0.2.78 +containers: + - cerc/uniswap-interface + - cerc/watcher-ts + - cerc/urbit-globs-host +pods: + - uniswap-interface + - proxy-server + - uniswap-urbit diff --git a/app/data/stacks/uniswap-v3/README.md b/stack_orchestrator/data/stacks/uniswap-v3/README.md similarity index 100% rename from app/data/stacks/uniswap-v3/README.md rename to stack_orchestrator/data/stacks/uniswap-v3/README.md diff --git a/app/data/stacks/uniswap-v3/stack.yml b/stack_orchestrator/data/stacks/uniswap-v3/stack.yml similarity index 100% rename from app/data/stacks/uniswap-v3/stack.yml rename to stack_orchestrator/data/stacks/uniswap-v3/stack.yml diff --git a/stack_orchestrator/data/stacks/webapp-template/README.md b/stack_orchestrator/data/stacks/webapp-template/README.md new file mode 100644 index 00000000..4441e475 --- /dev/null +++ b/stack_orchestrator/data/stacks/webapp-template/README.md @@ -0,0 +1 @@ +# Template stack for webapp deployments diff --git a/stack_orchestrator/data/stacks/webapp-template/stack.yml b/stack_orchestrator/data/stacks/webapp-template/stack.yml new file mode 100644 index 00000000..d574e764 --- /dev/null +++ b/stack_orchestrator/data/stacks/webapp-template/stack.yml @@ -0,0 +1,7 @@ +version: "1.0" +name: test +description: "Webapp deployment stack" +containers: + - cerc/webapp-template-container +pods: + - webapp-template diff --git a/app/data/version.txt b/stack_orchestrator/data/version.txt similarity index 100% rename from app/data/version.txt rename to stack_orchestrator/data/version.txt diff --git a/app/deploy/__init__.py b/stack_orchestrator/deploy/__init__.py similarity index 100% rename from app/deploy/__init__.py rename to stack_orchestrator/deploy/__init__.py diff --git a/app/deploy/compose/__init__.py b/stack_orchestrator/deploy/compose/__init__.py similarity index 100% rename from app/deploy/compose/__init__.py rename to stack_orchestrator/deploy/compose/__init__.py diff --git a/app/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py similarity index 65% rename from app/deploy/compose/deploy_docker.py rename to stack_orchestrator/deploy/compose/deploy_docker.py index 03306346..d34d1e6f 100644 --- a/app/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -13,16 +13,20 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from pathlib import Path from python_on_whales import DockerClient, DockerException -from app.deploy.deployer import Deployer, DeployerException +from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator +from stack_orchestrator.deploy.deployment_context import DeploymentContext class DockerDeployer(Deployer): name: str = "compose" + type: str - def __init__(self, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name, compose_env_file=compose_env_file) + self.type = type def up(self, detach, services): try: @@ -36,6 +40,13 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) + def status(self): + try: + for p in self.docker.compose.ps(): + print(f"{p.name}\t{p.state.status}") + except DockerException as e: + raise DeployerException(e) + def ps(self): try: return self.docker.compose.ps() @@ -48,9 +59,9 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) - def execute(self, service, command, envs): + def execute(self, service, command, tty, envs): try: - return self.docker.compose.execute(service=service, command=command, envs=envs) + return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) except DockerException as e: raise DeployerException(e) @@ -60,8 +71,19 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) - def run(self, image, command, user, volumes, entrypoint=None): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): try: - return self.docker.run(image=image, command=command, user=user, volumes=volumes, entrypoint=entrypoint) + return self.docker.run(image=image, command=command, user=user, volumes=volumes, + entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) except DockerException as e: raise DeployerException(e) + + +class DockerDeployerConfigGenerator(DeployerConfigGenerator): + + def __init__(self, type: str) -> None: + super().__init__() + + # Nothing needed at present for the docker deployer + def generate(self, deployment_dir: Path): + pass diff --git a/app/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py similarity index 86% rename from app/deploy/deploy.py rename to stack_orchestrator/deploy/deploy.py index 40cd0a8d..da96a500 100644 --- a/app/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -24,13 +24,16 @@ from importlib import resources import subprocess import click from pathlib import Path -from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path -from app.deploy.deployer import Deployer, DeployerException -from app.deploy.deployer_factory import getDeployer -from app.deploy.deploy_types import ClusterContext, DeployCommandContext -from app.deploy.deployment_create import create as deployment_create -from app.deploy.deployment_create import init as deployment_init -from app.deploy.deployment_create import setup as deployment_setup +from stack_orchestrator import constants +from stack_orchestrator.opts import opts +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path +from stack_orchestrator.deploy.deployer import Deployer, DeployerException +from stack_orchestrator.deploy.deployer_factory import getDeployer +from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext +from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.deploy.deployment_create import create as deployment_create +from stack_orchestrator.deploy.deployment_create import init as deployment_init +from stack_orchestrator.deploy.deployment_create import setup as deployment_setup @click.group() @@ -38,7 +41,7 @@ from app.deploy.deployment_create import setup as deployment_setup @click.option("--exclude", help="don\'t start these components") @click.option("--env-file", help="env file to be used") @click.option("--cluster", help="specify a non-default cluster name") -@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s)") +@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)") @click.pass_context def command(ctx, include, exclude, env_file, cluster, deploy_to): '''deploy a stack''' @@ -56,14 +59,25 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to): if deploy_to is None: deploy_to = "compose" - ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to) + ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to) # Subcommand is executed now, by the magic of click -def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer): +def create_deploy_context( + global_context, + deployment_context: DeploymentContext, + stack, + include, + exclude, + cluster, + env_file, + deploy_to) -> DeployCommandContext: + # Extract the cluster name from the deployment, if we have one + if deployment_context and cluster is None: + cluster = deployment_context.get_cluster_id() cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) - # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ - deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, + deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, + compose_project_name=cluster_context.cluster, compose_env_file=cluster_context.env_file) return DeployCommandContext(stack, cluster_context, deployer) @@ -98,6 +112,14 @@ def down_operation(ctx, delete_volumes, extra_args_list): ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) +def status_operation(ctx): + global_context = ctx.parent.parent.obj + if not global_context.dry_run: + if global_context.verbose: + print("Running compose status") + ctx.obj.deployer.status() + + def ps_operation(ctx): global_context = ctx.parent.parent.obj if not global_context.dry_run: @@ -151,7 +173,7 @@ def exec_operation(ctx, extra_args): if global_context.verbose: print(f"Running compose exec {service_name} {command_to_exec}") try: - ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env) + ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True) except DeployerException: print("container command returned error exit status") @@ -244,6 +266,22 @@ def _make_runtime_env(ctx): return container_exec_env +def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude): + # Create default unique, stable cluster name from confile file path and stack name if provided + if deployment: + path = os.path.realpath(os.path.abspath(compose_dir)) + else: + path = "internal" + unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" + if opts.o.debug: + print(f"pre-hash descriptor: {unique_cluster_descriptor}") + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + cluster = f"{constants.cluster_name_prefix}{hash}" + if opts.o.debug: + print(f"Using cluster name: {cluster}") + return cluster + + # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): @@ -261,19 +299,12 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose") if cluster is None: - # Create default unique, stable cluster name from confile file path and stack name if provided - # TODO: change this to the config file path - path = os.path.realpath(sys.argv[0]) - unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" - if ctx.debug: - print(f"pre-hash descriptor: {unique_cluster_descriptor}") - hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() - cluster = f"laconic-{hash}" - if ctx.verbose: - print(f"Using cluster name: {cluster}") + cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) + else: + _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with resources.open_text(data, "pod-list.txt") as pod_list_file: all_pods = pod_list_file.read().splitlines() @@ -307,7 +338,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") else: if deployment: - compose_file_name = os.path.join(compose_dir, "docker-compose.yml") + compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml") pod_pre_start_command = pod["pre_start_command"] pod_post_start_command = pod["post_start_command"] script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts") @@ -317,7 +348,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): post_start_commands.append(os.path.join(script_dir, pod_post_start_command)) else: pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) - compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml") + compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml") pod_pre_start_command = pod["pre_start_command"] pod_post_start_command = pod["post_start_command"] if pod_pre_start_command is not None: diff --git a/app/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py similarity index 86% rename from app/deploy/deploy_types.py rename to stack_orchestrator/deploy/deploy_types.py index 16b5c313..f97b2649 100644 --- a/app/deploy/deploy_types.py +++ b/stack_orchestrator/deploy/deploy_types.py @@ -13,11 +13,10 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from typing import List +from typing import List, Mapping from dataclasses import dataclass -from pathlib import Path -from app.command_types import CommandOptions -from app.deploy.deployer import Deployer +from stack_orchestrator.command_types import CommandOptions +from stack_orchestrator.deploy.deployer import Deployer @dataclass @@ -38,12 +37,6 @@ class DeployCommandContext: deployer: Deployer -@dataclass -class DeploymentContext: - deployment_dir: Path - command_context: DeployCommandContext - - @dataclass class VolumeMapping: host_path: str @@ -66,3 +59,8 @@ class LaconicStackSetupCommand: @dataclass class LaconicStackCreateCommand: network_dir: str + + +@dataclass +class DeployEnvVars: + map: Mapping[str, str] diff --git a/app/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py similarity index 66% rename from app/deploy/deploy_util.py rename to stack_orchestrator/deploy/deploy_util.py index 4a1ffbfe..8b812d3a 100644 --- a/app/deploy/deploy_util.py +++ b/stack_orchestrator/deploy/deploy_util.py @@ -14,9 +14,10 @@ # along with this program. If not, see . import os -from typing import List -from app.deploy.deploy_types import DeployCommandContext, VolumeMapping -from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list +from typing import List, Any +from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping +from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list +from stack_orchestrator.opts import opts def _container_image_from_service(stack: str, service: str): @@ -37,6 +38,33 @@ def _container_image_from_service(stack: str, service: str): return image_name +def parsed_pod_files_map_from_file_names(pod_files): + parsed_pod_yaml_map : Any = {} + for pod_file in pod_files: + with open(pod_file, "r") as pod_file_descriptor: + parsed_pod_file = get_yaml().load(pod_file_descriptor) + parsed_pod_yaml_map[pod_file] = parsed_pod_file + if opts.o.debug: + print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}") + return parsed_pod_yaml_map + + +def images_for_deployment(pod_files: List[str]): + image_set = set() + parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) + # Find the set of images in the pods + for pod_name in parsed_pod_yaml_map: + pod = parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + service_info = services[service_name] + image = service_info["image"] + image_set.add(image) + if opts.o.debug: + print(f"image_set: {image_set}") + return image_set + + def _volumes_to_docker(mounts: List[VolumeMapping]): # Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")] result = [] diff --git a/app/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py similarity index 77% rename from app/deploy/deployer.py rename to stack_orchestrator/deploy/deployer.py index b46a2d23..2806044b 100644 --- a/app/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -14,6 +14,7 @@ # along with this program. If not, see . from abc import ABC, abstractmethod +from pathlib import Path class Deployer(ABC): @@ -30,12 +31,16 @@ class Deployer(ABC): def ps(self): pass + @abstractmethod + def status(self): + pass + @abstractmethod def port(self, service, private_port): pass @abstractmethod - def execute(self, service_name, command, envs): + def execute(self, service_name, command, tty, envs): pass @abstractmethod @@ -43,10 +48,17 @@ class Deployer(ABC): pass @abstractmethod - def run(self, image, command, user, volumes, entrypoint): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): pass class DeployerException(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) + + +class DeployerConfigGenerator(ABC): + + @abstractmethod + def generate(self, deployment_dir: Path): + pass diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py new file mode 100644 index 00000000..959c1b7a --- /dev/null +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -0,0 +1,36 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from stack_orchestrator import constants +from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator +from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator + + +def getDeployerConfigGenerator(type: str): + if type == "compose" or type is None: + return DockerDeployerConfigGenerator(type) + elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: + return K8sDeployerConfigGenerator(type) + else: + print(f"ERROR: deploy-to {type} is not valid") + + +def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file): + if type == "compose" or type is None: + return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) + elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: + return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) + else: + print(f"ERROR: deploy-to {type} is not valid") diff --git a/app/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py similarity index 78% rename from app/deploy/deployment.py rename to stack_orchestrator/deploy/deployment.py index b1b4a486..366a83f6 100644 --- a/app/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -16,36 +16,12 @@ import click from pathlib import Path import sys -from app.deploy.deploy import up_operation, down_operation, ps_operation, port_operation -from app.deploy.deploy import exec_operation, logs_operation, create_deploy_context -from app.deploy.stack import Stack -from app.deploy.spec import Spec - - -class DeploymentContext: - dir: Path - spec: Spec - stack: Stack - - def get_stack_file(self): - return self.dir.joinpath("stack.yml") - - def get_spec_file(self): - return self.dir.joinpath("spec.yml") - - def get_env_file(self): - return self.dir.joinpath("config.env") - - # TODO: implement me - def get_cluster_name(self): - return None - - def init(self, dir): - self.dir = dir - self.stack = Stack() - self.stack.init_from_file(self.get_stack_file()) - self.spec = Spec() - self.spec.init_from_file(self.get_spec_file()) +from stack_orchestrator import constants +from stack_orchestrator.deploy.images import push_images_operation +from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation +from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context +from stack_orchestrator.deploy.deploy_types import DeployCommandContext +from stack_orchestrator.deploy.deployment_context import DeploymentContext @click.group() @@ -72,13 +48,17 @@ def command(ctx, dir): ctx.obj = deployment_context -def make_deploy_context(ctx): +def make_deploy_context(ctx) -> DeployCommandContext: context: DeploymentContext = ctx.obj stack_file_path = context.get_stack_file() env_file = context.get_env_file() - cluster_name = context.get_cluster_name() - return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file, - context.spec.obj["deploy-to"]) + cluster_name = context.get_cluster_id() + if constants.deploy_to_key in context.spec.obj: + deployment_type = context.spec.obj[constants.deploy_to_key] + else: + deployment_type = constants.compose_deploy_type + return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file, + deployment_type) @command.command() @@ -131,6 +111,14 @@ def ps(ctx): ps_operation(ctx) +@command.command() +@click.pass_context +def push_images(ctx): + deploy_command_context: DeployCommandContext = make_deploy_context(ctx) + deployment_context: DeploymentContext = ctx.obj + push_images_operation(deploy_command_context, deployment_context) + + @command.command() @click.argument('extra_args', nargs=-1) # help: command: port @click.pass_context @@ -159,4 +147,5 @@ def logs(ctx, tail, follow, extra_args): @command.command() @click.pass_context def status(ctx): - print(f"Context: {ctx.parent.obj}") + ctx.obj = make_deploy_context(ctx) + status_operation(ctx) diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py new file mode 100644 index 00000000..27e32812 --- /dev/null +++ b/stack_orchestrator/deploy/deployment_context.py @@ -0,0 +1,69 @@ + +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import hashlib +import os +from pathlib import Path + +from stack_orchestrator import constants +from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.stack import Stack +from stack_orchestrator.deploy.spec import Spec + + +class DeploymentContext: + deployment_dir: Path + id: str + spec: Spec + stack: Stack + + def get_stack_file(self): + return self.deployment_dir.joinpath(constants.stack_file_name) + + def get_spec_file(self): + return self.deployment_dir.joinpath(constants.spec_file_name) + + def get_env_file(self): + return self.deployment_dir.joinpath(constants.config_file_name) + + def get_deployment_file(self): + return self.deployment_dir.joinpath(constants.deployment_file_name) + + def get_compose_dir(self): + return self.deployment_dir.joinpath(constants.compose_dir_name) + + def get_cluster_id(self): + return self.id + + def init(self, dir): + self.deployment_dir = dir + self.spec = Spec() + self.spec.init_from_file(self.get_spec_file()) + self.stack = Stack(self.spec.obj["stack"]) + self.stack.init_from_file(self.get_stack_file()) + deployment_file_path = self.get_deployment_file() + if deployment_file_path.exists(): + with deployment_file_path: + obj = get_yaml().load(open(deployment_file_path, "r")) + self.id = obj[constants.cluster_id_key] + # Handle the case of a legacy deployment with no file + # Code below is intended to match the output from _make_default_cluster_name() + # TODO: remove when we no longer need to support legacy deployments + else: + path = os.path.realpath(os.path.abspath(self.get_compose_dir())) + unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + self.id = f"{constants.cluster_name_prefix}{hash}" diff --git a/app/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py similarity index 63% rename from app/deploy/deployment_create.py rename to stack_orchestrator/deploy/deployment_create.py index dcaccb2b..9eaea30c 100644 --- a/app/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -20,14 +20,20 @@ from pathlib import Path from typing import List import random from shutil import copy, copyfile, copytree +from secrets import token_hex import sys -from app.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, - get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_path) -from app.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand +from stack_orchestrator import constants +from stack_orchestrator.opts import opts +from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, + global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, + get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file) +from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand +from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator +from stack_orchestrator.deploy.deployment_context import DeploymentContext def _make_default_deployment_dir(): - return "deployment-001" + return Path("deployment-001") def _get_ports(stack): @@ -99,16 +105,17 @@ def _fixup_pod_file(pod, spec, compose_dir): } pod["volumes"][volume] = new_volume_spec # Fix up ports - if "ports" in spec: - spec_ports = spec["ports"] + if "network" in spec and "ports" in spec["network"]: + spec_ports = spec["network"]["ports"] for container_name, container_ports in spec_ports.items(): if container_name in pod["services"]: pod["services"][container_name]["ports"] = container_ports -def _commands_plugin_path(ctx: DeployCommandContext): - plugin_path = get_plugin_code_path(ctx.stack) - return plugin_path.joinpath("deploy", "commands.py") +def _commands_plugin_paths(stack_name: str): + plugin_paths = get_plugin_code_paths(stack_name) + ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths] + return ret # See: https://stackoverflow.com/a/54625079/1701505 @@ -120,15 +127,23 @@ def call_stack_deploy_init(deploy_command_context): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deploy_command_context) - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "init"): - return imported_stack.init(deploy_command_context) - else: - return None + python_file_paths = _commands_plugin_paths(deploy_command_context.stack) + + ret = None + init_done = False + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "init"): + if not init_done: + ret = imported_stack.init(deploy_command_context) + init_done = True + else: + # TODO: remove this restriction + print(f"Skipping init() from plugin {python_file_path}. Only one init() is allowed.") + return ret # TODO: fold this with function above @@ -136,16 +151,14 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deploy_command_context) - print(f"Path: {python_file_path}") - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "setup"): - return imported_stack.setup(deploy_command_context, parameters, extra_args) - else: - return None + python_file_paths = _commands_plugin_paths(deploy_command_context.stack) + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "setup"): + imported_stack.setup(deploy_command_context, parameters, extra_args) # TODO: fold this with function above @@ -153,15 +166,14 @@ def call_stack_deploy_create(deployment_context, extra_args): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deployment_context.command_context) - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "create"): - return imported_stack.create(deployment_context, extra_args) - else: - return None + python_file_paths = _commands_plugin_paths(deployment_context.stack.name) + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "create"): + imported_stack.create(deployment_context, extra_args) # Inspect the pod yaml to find config files referenced in subdirectories @@ -233,37 +245,76 @@ def _parse_config_variables(variable_values: str): variable_name = variable_value_pair[0] variable_value = variable_value_pair[1] result_values[variable_name] = variable_value - result = {"config": result_values} + result = result_values return result @click.command() @click.option("--config", help="Provide config variables for the deployment") +@click.option("--config-file", help="Provide config variables in a file for the deployment") +@click.option("--kube-config", help="Provide a config file for a k8s deployment") +@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") @click.option("--output", required=True, help="Write yaml spec file here") @click.option("--map-ports-to-host", required=False, help="Map ports to the host as one of: any-variable-random (default), " "localhost-same, any-same, localhost-fixed-random, any-fixed-random") @click.pass_context -def init(ctx, config, output, map_ports_to_host): - yaml = get_yaml() +def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host): stack = global_options(ctx).stack - debug = global_options(ctx).debug - default_spec_file_content = call_stack_deploy_init(ctx.obj) - spec_file_content = {"stack": stack, "deploy-to": ctx.obj.deployer.name} + deployer_type = ctx.obj.deployer.type + deploy_command_context = ctx.obj + return init_operation( + deploy_command_context, + stack, deployer_type, + config, config_file, + kube_config, + image_registry, + output, + map_ports_to_host) + + +# The init command's implementation is in a separate function so that we can +# call it from other commands, bypassing the click decoration stuff +def init_operation(deploy_command_context, stack, deployer_type, config, + config_file, kube_config, image_registry, output, map_ports_to_host): + + default_spec_file_content = call_stack_deploy_init(deploy_command_context) + spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} + if deployer_type == "k8s": + if kube_config is None: + error_exit("--kube-config must be supplied with --deploy-to k8s") + if image_registry is None: + error_exit("--image-registry must be supplied with --deploy-to k8s") + spec_file_content.update({constants.kube_config_key: kube_config}) + spec_file_content.update({constants.image_resigtry_key: image_registry}) + else: + # Check for --kube-config supplied for non-relevant deployer types + if kube_config is not None: + error_exit(f"--kube-config is not allowed with a {deployer_type} deployment") + if image_registry is not None: + error_exit(f"--image-registry is not allowed with a {deployer_type} deployment") if default_spec_file_content: spec_file_content.update(default_spec_file_content) config_variables = _parse_config_variables(config) + # Implement merge, since update() overwrites if config_variables: - # Implement merge, since update() overwrites - orig_config = spec_file_content["config"] - new_config = config_variables["config"] + orig_config = spec_file_content.get("config", {}) + new_config = config_variables merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) - if debug: - print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") + if config_file: + config_file_path = Path(config_file) + if not config_file_path.exists(): + error_exit(f"config file: {config_file} does not exist") + config_file_variables = env_var_map_from_file(config_file_path) + if config_file_variables: + orig_config = spec_file_content.get("config", {}) + new_config = config_file_variables + merged_config = {**new_config, **orig_config} + spec_file_content.update({"config": merged_config}) ports = _get_mapped_ports(stack, map_ports_to_host) - spec_file_content["ports"] = ports + spec_file_content.update({"network": {"ports": ports}}) named_volumes = _get_named_volumes(stack) if named_volumes: @@ -272,8 +323,11 @@ def init(ctx, config, output, map_ports_to_host): volume_descriptors[named_volume] = f"./data/{named_volume}" spec_file_content["volumes"] = volume_descriptors + if opts.o.debug: + print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") + with open(output, "w") as output_file: - yaml.dump(spec_file_content, output_file) + get_yaml().dump(spec_file_content, output_file) def _write_config_file(spec_file: Path, config_env_file: Path): @@ -287,12 +341,25 @@ def _write_config_file(spec_file: Path, config_env_file: Path): output_file.write(f"{variable_name}={variable_value}\n") +def _write_kube_config_file(external_path: Path, internal_path: Path): + if not external_path.exists(): + error_exit(f"Kube config file {external_path} does not exist") + copyfile(external_path, internal_path) + + def _copy_files_to_directory(file_paths: List[Path], directory: Path): for path in file_paths: # Using copy to preserve the execute bit copy(path, os.path.join(directory, os.path.basename(path))) +def _create_deployment_file(deployment_dir: Path): + deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name) + cluster = f"{constants.cluster_name_prefix}{token_hex(8)}" + with open(deployment_file_path, "w") as output_file: + output_file.write(f"{constants.cluster_id_key}: {cluster}\n") + + @click.command() @click.option("--spec-file", required=True, help="Spec file to use to create this deployment") @click.option("--deployment-dir", help="Create deployment files in this directory") @@ -301,29 +368,42 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): @click.option("--initial-peers", help="Initial set of persistent peers") @click.pass_context def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): - # This function fails with a useful error message if the file doens't exist + deployment_command_context = ctx.obj + return create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers) + + +# The init command's implementation is in a separate function so that we can +# call it from other commands, bypassing the click decoration stuff +def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers): parsed_spec = get_parsed_deployment_spec(spec_file) - stack_name = parsed_spec['stack'] + stack_name = parsed_spec["stack"] + deployment_type = parsed_spec[constants.deploy_to_key] stack_file = get_stack_file_path(stack_name) parsed_stack = get_parsed_stack_config(stack_name) - if global_options(ctx).debug: + if opts.o.debug: print(f"parsed spec: {parsed_spec}") if deployment_dir is None: - deployment_dir = _make_default_deployment_dir() - if os.path.exists(deployment_dir): - print(f"Error: {deployment_dir} already exists") - sys.exit(1) - os.mkdir(deployment_dir) + deployment_dir_path = _make_default_deployment_dir() + else: + deployment_dir_path = Path(deployment_dir) + if deployment_dir_path.exists(): + error_exit(f"{deployment_dir_path} already exists") + os.mkdir(deployment_dir_path) # Copy spec file and the stack file into the deployment dir - copyfile(spec_file, os.path.join(deployment_dir, "spec.yml")) - copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file))) + copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) + copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) + _create_deployment_file(deployment_dir_path) # Copy any config varibles from the spec file into an env file suitable for compose - _write_config_file(spec_file, os.path.join(deployment_dir, "config.env")) + _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) + # Copy any k8s config file into the deployment dir + if deployment_type == "k8s": + _write_kube_config_file(Path(parsed_spec[constants.kube_config_key]), + deployment_dir_path.joinpath(constants.kube_config_filename)) # Copy the pod files into the deployment dir, fixing up content pods = get_pod_list(parsed_stack) - destination_compose_dir = os.path.join(deployment_dir, "compose") + destination_compose_dir = deployment_dir_path.joinpath("compose") os.mkdir(destination_compose_dir) - destination_pods_dir = os.path.join(deployment_dir, "pods") + destination_pods_dir = deployment_dir_path.joinpath("pods") os.mkdir(destination_pods_dir) data_dir = Path(__file__).absolute().parent.parent.joinpath("data") yaml = get_yaml() @@ -331,12 +411,12 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): pod_file_path = get_pod_file_path(parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) - destination_pod_dir = os.path.join(destination_pods_dir, pod) + destination_pod_dir = destination_pods_dir.joinpath(pod) os.mkdir(destination_pod_dir) - if global_options(ctx).debug: + if opts.o.debug: print(f"extra config dirs: {extra_config_dirs}") _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) - with open(os.path.join(destination_compose_dir, os.path.basename(pod_file_path)), "w") as output_file: + with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file: yaml.dump(parsed_pod_file, output_file) # Copy the config files for the pod, if any config_dirs = {pod} @@ -344,22 +424,26 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): for config_dir in config_dirs: source_config_dir = data_dir.joinpath("config", config_dir) if os.path.exists(source_config_dir): - destination_config_dir = os.path.join(deployment_dir, "config", config_dir) + destination_config_dir = deployment_dir_path.joinpath("config", config_dir) # If the same config dir appears in multiple pods, it may already have been copied if not os.path.exists(destination_config_dir): copytree(source_config_dir, destination_config_dir) # Copy the script files for the pod, if any if pod_has_scripts(parsed_stack, pod): - destination_script_dir = os.path.join(destination_pod_dir, "scripts") + destination_script_dir = destination_pod_dir.joinpath("scripts") os.mkdir(destination_script_dir) script_paths = get_pod_script_paths(parsed_stack, pod) _copy_files_to_directory(script_paths, destination_script_dir) # Delegate to the stack's Python code # The deploy create command doesn't require a --stack argument so we need to insert the # stack member here. - deployment_command_context = ctx.obj deployment_command_context.stack = stack_name - deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context) + deployment_context = DeploymentContext() + deployment_context.init(deployment_dir_path) + # Call the deployer to generate any deployer-specific files (e.g. for kind) + deployer_config_generator = getDeployerConfigGenerator(deployment_type) + # TODO: make deployment_dir_path a Path above + deployer_config_generator.generate(deployment_dir_path) call_stack_deploy_create(deployment_context, [network_dir, initial_peers]) diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py new file mode 100644 index 00000000..ddbb33f7 --- /dev/null +++ b/stack_orchestrator/deploy/images.py @@ -0,0 +1,62 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from typing import Set + +from python_on_whales import DockerClient + +from stack_orchestrator import constants +from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.deploy.deploy_types import DeployCommandContext +from stack_orchestrator.deploy.deploy_util import images_for_deployment + + +def _image_needs_pushed(image: str): + # TODO: this needs to be more intelligent + return image.endswith(":local") + + +def remote_tag_for_image(image: str, remote_repo_url: str): + # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy + (org, image_name_with_version) = image.split("/") + (image_name, image_version) = image_name_with_version.split(":") + if image_version == "local": + return f"{remote_repo_url}/{image_name}:deploy" + else: + return image + + +# TODO: needs lots of error handling +def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext): + # Get the list of images for the stack + cluster_context = command_context.cluster_context + images: Set[str] = images_for_deployment(cluster_context.compose_files) + # Tag the images for the remote repo + remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key] + docker = DockerClient() + for image in images: + if _image_needs_pushed(image): + remote_tag = remote_tag_for_image(image, remote_repo_url) + if opts.o.verbose: + print(f"Tagging {image} to {remote_tag}") + docker.image.tag(image, remote_tag) + # Run docker push commands to upload + for image in images: + if _image_needs_pushed(image): + remote_tag = remote_tag_for_image(image, remote_repo_url) + if opts.o.verbose: + print(f"Pushing image {remote_tag}") + docker.image.push(remote_tag) diff --git a/app/deploy/k8s/__init__.py b/stack_orchestrator/deploy/k8s/__init__.py similarity index 100% rename from app/deploy/k8s/__init__.py rename to stack_orchestrator/deploy/k8s/__init__.py diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py new file mode 100644 index 00000000..0aa74189 --- /dev/null +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -0,0 +1,217 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from kubernetes import client +from typing import Any, List, Set + +from stack_orchestrator.opts import opts +from stack_orchestrator.util import env_var_map_from_file +from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files +from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path +from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map +from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment +from stack_orchestrator.deploy.deploy_types import DeployEnvVars +from stack_orchestrator.deploy.spec import Spec +from stack_orchestrator.deploy.images import remote_tag_for_image + + +class ClusterInfo: + parsed_pod_yaml_map: Any + image_set: Set[str] = set() + app_name: str + environment_variables: DeployEnvVars + spec: Spec + + def __init__(self) -> None: + pass + + def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec): + self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) + # Find the set of images in the pods + self.image_set = images_for_deployment(pod_files) + self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) + self.app_name = deployment_name + self.spec = spec + if (opts.o.debug): + print(f"Env vars: {self.environment_variables.map}") + + def get_ingress(self): + # No ingress for a deployment that has no http-proxy defined, for now + http_proxy_info_list = self.spec.get_http_proxy() + ingress = None + if http_proxy_info_list: + # TODO: handle multiple definitions + http_proxy_info = http_proxy_info_list[0] + if opts.o.debug: + print(f"http-proxy: {http_proxy_info}") + # TODO: good enough parsing for webapp deployment for now + host_name = http_proxy_info["host-name"] + rules = [] + tls = [client.V1IngressTLS( + hosts=[host_name], + secret_name=f"{self.app_name}-tls" + )] + paths = [] + for route in http_proxy_info["routes"]: + path = route["path"] + proxy_to = route["proxy-to"] + if opts.o.debug: + print(f"proxy config: {path} -> {proxy_to}") + # proxy_to has the form : + proxy_to_port = int(proxy_to.split(":")[1]) + paths.append(client.V1HTTPIngressPath( + path_type="Prefix", + path=path, + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + # TODO: this looks wrong + name=f"{self.app_name}-service", + # TODO: pull port number from the service + port=client.V1ServiceBackendPort(number=proxy_to_port) + ) + ) + )) + rules.append(client.V1IngressRule( + host=host_name, + http=client.V1HTTPIngressRuleValue( + paths=paths + ) + )) + spec = client.V1IngressSpec( + tls=tls, + rules=rules + ) + ingress = client.V1Ingress( + metadata=client.V1ObjectMeta( + name=f"{self.app_name}-ingress", + annotations={ + "kubernetes.io/ingress.class": "nginx", + "cert-manager.io/cluster-issuer": "letsencrypt-prod" + } + ), + spec=spec + ) + return ingress + + # TODO: suppoprt multiple services + def get_service(self): + for pod_name in self.parsed_pod_yaml_map: + pod = self.parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + service_info = services[service_name] + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"service port: {port}") + service = client.V1Service( + metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), + spec=client.V1ServiceSpec( + type="ClusterIP", + ports=[client.V1ServicePort( + port=port, + target_port=port + )], + selector={"app": self.app_name} + ) + ) + return service + + def get_pvcs(self): + result = [] + volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + if opts.o.debug: + print(f"Volumes: {volumes}") + for volume_name in volumes: + spec = client.V1PersistentVolumeClaimSpec( + access_modes=["ReadWriteOnce"], + storage_class_name="manual", + resources=client.V1ResourceRequirements( + requests={"storage": "2Gi"} + ), + volume_name=volume_name + ) + pvc = client.V1PersistentVolumeClaim( + metadata=client.V1ObjectMeta(name=volume_name, + labels={"volume-label": volume_name}), + spec=spec, + ) + result.append(pvc) + return result + + def get_pvs(self): + result = [] + volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + for volume_name in volumes: + spec = client.V1PersistentVolumeSpec( + storage_class_name="manual", + access_modes=["ReadWriteOnce"], + capacity={"storage": "2Gi"}, + host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) + ) + pv = client.V1PersistentVolume( + metadata=client.V1ObjectMeta(name=volume_name, + labels={"volume-label": volume_name}), + spec=spec, + ) + result.append(pv) + return result + + # to suit the deployment, and also annotate the container specs to point at said volumes + def get_deployment(self): + containers = [] + for pod_name in self.parsed_pod_yaml_map: + pod = self.parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + container_name = service_name + service_info = services[service_name] + image = service_info["image"] + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"image: {image}") + print(f"service port: {port}") + # Re-write the image tag for remote deployment + image_to_use = remote_tag_for_image( + image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image + volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) + container = client.V1Container( + name=container_name, + image=image_to_use, + env=envs_from_environment_variables_map(self.environment_variables.map), + ports=[client.V1ContainerPort(container_port=port)], + volume_mounts=volume_mounts, + resources=client.V1ResourceRequirements( + requests={"cpu": "100m", "memory": "200Mi"}, + limits={"cpu": "500m", "memory": "500Mi"}, + ), + ) + containers.append(container) + volumes = volumes_for_pod_files(self.parsed_pod_yaml_map) + template = client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta(labels={"app": self.app_name}), + spec=client.V1PodSpec(containers=containers, volumes=volumes), + ) + spec = client.V1DeploymentSpec( + replicas=1, template=template, selector={ + "matchLabels": + {"app": self.app_name}}) + + deployment = client.V1Deployment( + api_version="apps/v1", + kind="Deployment", + metadata=client.V1ObjectMeta(name=f"{self.app_name}-deployment"), + spec=spec, + ) + return deployment diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py new file mode 100644 index 00000000..95131966 --- /dev/null +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -0,0 +1,333 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from pathlib import Path +from kubernetes import client, config + +from stack_orchestrator import constants +from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator +from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind +from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config +from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo +from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.util import error_exit + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def _check_delete_exception(e: client.exceptions.ApiException): + if e.status == 404: + if opts.o.debug: + print("Failed to delete object, continuing") + else: + error_exit(f"k8s api error: {e}") + + +class K8sDeployer(Deployer): + name: str = "k8s" + type: str + core_api: client.CoreV1Api + apps_api: client.AppsV1Api + networking_api: client.NetworkingV1Api + k8s_namespace: str = "default" + kind_cluster_name: str + cluster_info: ClusterInfo + deployment_dir: Path + deployment_context: DeploymentContext + + def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: + self.type = type + # TODO: workaround pending refactoring above to cope with being created with a null deployment_context + if deployment_context is None: + return + self.deployment_dir = deployment_context.deployment_dir + self.deployment_context = deployment_context + self.kind_cluster_name = compose_project_name + self.cluster_info = ClusterInfo() + self.cluster_info.int(compose_files, compose_env_file, compose_project_name, deployment_context.spec) + if (opts.o.debug): + print(f"Deployment dir: {deployment_context.deployment_dir}") + print(f"Compose files: {compose_files}") + print(f"Project name: {compose_project_name}") + print(f"Env file: {compose_env_file}") + print(f"Type: {type}") + + def connect_api(self): + if self.is_kind(): + config.load_kube_config(context=f"kind-{self.kind_cluster_name}") + else: + # Get the config file and pass to load_kube_config() + config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix()) + self.core_api = client.CoreV1Api() + self.networking_api = client.NetworkingV1Api() + self.apps_api = client.AppsV1Api() + self.custom_obj_api = client.CustomObjectsApi() + + def up(self, detach, services): + + if self.is_kind(): + # Create the kind cluster + create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) + # Ensure the referenced containers are copied into kind + load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + self.connect_api() + + # Create the host-path-mounted PVs for this deployment + pvs = self.cluster_info.get_pvs() + for pv in pvs: + if opts.o.debug: + print(f"Sending this pv: {pv}") + pv_resp = self.core_api.create_persistent_volume(body=pv) + if opts.o.debug: + print("PVs created:") + print(f"{pv_resp}") + + # Figure out the PVCs for this deployment + pvcs = self.cluster_info.get_pvcs() + for pvc in pvcs: + if opts.o.debug: + print(f"Sending this pvc: {pvc}") + pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) + if opts.o.debug: + print("PVCs created:") + print(f"{pvc_resp}") + # Process compose files into a Deployment + deployment = self.cluster_info.get_deployment() + # Create the k8s objects + if opts.o.debug: + print(f"Sending this deployment: {deployment}") + deployment_resp = self.apps_api.create_namespaced_deployment( + body=deployment, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("Deployment created:") + print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ + {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") + + service: client.V1Service = self.cluster_info.get_service() + service_resp = self.core_api.create_namespaced_service( + namespace=self.k8s_namespace, + body=service + ) + if opts.o.debug: + print("Service created:") + print(f"{service_resp}") + + # TODO: disable ingress for kind + ingress: client.V1Ingress = self.cluster_info.get_ingress() + + if opts.o.debug: + print(f"Sending this ingress: {ingress}") + ingress_resp = self.networking_api.create_namespaced_ingress( + namespace=self.k8s_namespace, + body=ingress + ) + if opts.o.debug: + print("Ingress created:") + print(f"{ingress_resp}") + + def down(self, timeout, volumes): + self.connect_api() + # Delete the k8s objects + # Create the host-path-mounted PVs for this deployment + pvs = self.cluster_info.get_pvs() + for pv in pvs: + if opts.o.debug: + print(f"Deleting this pv: {pv}") + try: + pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) + if opts.o.debug: + print("PV deleted:") + print(f"{pv_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + # Figure out the PVCs for this deployment + pvcs = self.cluster_info.get_pvcs() + for pvc in pvcs: + if opts.o.debug: + print(f"Deleting this pvc: {pvc}") + try: + pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim( + name=pvc.metadata.name, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("PVCs deleted:") + print(f"{pvc_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) + deployment = self.cluster_info.get_deployment() + if opts.o.debug: + print(f"Deleting this deployment: {deployment}") + try: + self.apps_api.delete_namespaced_deployment( + name=deployment.metadata.name, namespace=self.k8s_namespace + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + service: client.V1Service = self.cluster_info.get_service() + if opts.o.debug: + print(f"Deleting service: {service}") + try: + self.core_api.delete_namespaced_service( + namespace=self.k8s_namespace, + name=service.metadata.name + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + # TODO: disable ingress for kind + ingress: client.V1Ingress = self.cluster_info.get_ingress() + if opts.o.debug: + print(f"Deleting this ingress: {ingress}") + try: + self.networking_api.delete_namespaced_ingress( + name=ingress.metadata.name, namespace=self.k8s_namespace + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + if self.is_kind(): + # Destroy the kind cluster + destroy_cluster(self.kind_cluster_name) + + def status(self): + self.connect_api() + # Call whatever API we need to get the running container list + all_pods = self.core_api.list_pod_for_all_namespaces(watch=False) + pods = [] + + if all_pods.items: + for p in all_pods.items: + if self.cluster_info.app_name in p.metadata.name: + pods.append(p) + + if not pods: + return + + hostname = "?" + ip = "?" + tls = "?" + try: + ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace, + name=self.cluster_info.get_ingress().metadata.name) + + cert = self.custom_obj_api.get_namespaced_custom_object( + group="cert-manager.io", + version="v1", + namespace=self.k8s_namespace, + plural="certificates", + name=ingress.spec.tls[0].secret_name + ) + + hostname = ingress.spec.tls[0].hosts[0] + ip = ingress.status.load_balancer.ingress[0].ip + tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"]) + except: # noqa: E722 + pass + + print("Ingress:") + print("\tHostname:", hostname) + print("\tIP:", ip) + print("\tTLS:", tls) + print("") + print("Pods:") + + for p in pods: + if p.metadata.deletion_timestamp: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})") + else: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})") + + def ps(self): + self.connect_api() + pods = self.core_api.list_pod_for_all_namespaces(watch=False) + + ret = [] + + for p in pods.items: + if self.cluster_info.app_name in p.metadata.name: + pod_ip = p.status.pod_ip + ports = AttrDict() + for c in p.spec.containers: + if c.ports: + for prt in c.ports: + ports[str(prt.container_port)] = [AttrDict({ + "HostIp": pod_ip, + "HostPort": prt.container_port + })] + + ret.append(AttrDict({ + "id": f"{p.metadata.namespace}/{p.metadata.name}", + "name": p.metadata.name, + "namespace": p.metadata.namespace, + "network_settings": AttrDict({ + "ports": ports + }) + })) + + return ret + + def port(self, service, private_port): + # Since we handle the port mapping, need to figure out where this comes from + # Also look into whether it makes sense to get ports for k8s + pass + + def execute(self, service_name, command, tty, envs): + # Call the API to execute a command in a running container + pass + + def logs(self, services, tail, follow, stream): + self.connect_api() + pods = pods_in_deployment(self.core_api, "test-deployment") + if len(pods) > 1: + print("Warning: more than one pod in the deployment") + k8s_pod_name = pods[0] + log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test") + return log_stream_from_string(log_data) + + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): + # We need to figure out how to do this -- check why we're being called first + pass + + def is_kind(self): + return self.type == "k8s-kind" + + +class K8sDeployerConfigGenerator(DeployerConfigGenerator): + type: str + + def __init__(self, type: str) -> None: + self.type = type + super().__init__() + + def generate(self, deployment_dir: Path): + # No need to do this for the remote k8s case + if self.type == "k8s-kind": + # Check the file isn't already there + # Get the config file contents + content = generate_kind_config(deployment_dir) + if opts.o.debug: + print(f"kind config is: {content}") + config_file = deployment_dir.joinpath(constants.kind_config_filename) + # Write the file + with open(config_file, "w") as output_file: + output_file.write(content) diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py new file mode 100644 index 00000000..9f968dbf --- /dev/null +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -0,0 +1,225 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from kubernetes import client +import os +from pathlib import Path +import subprocess +from typing import Set, Mapping, List + +from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names + + +def _run_command(command: str): + if opts.o.debug: + print(f"Running: {command}") + result = subprocess.run(command, shell=True) + if opts.o.debug: + print(f"Result: {result}") + + +def create_cluster(name: str, config_file: str): + _run_command(f"kind create cluster --name {name} --config {config_file}") + + +def destroy_cluster(name: str): + _run_command(f"kind delete cluster --name {name}") + + +def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]): + for image in image_set: + _run_command(f"kind load docker-image {image} --name {kind_cluster_name}") + + +def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str): + pods = [] + pod_response = core_api.list_namespaced_pod(namespace="default", label_selector="app=test-app") + if opts.o.debug: + print(f"pod_response: {pod_response}") + for pod_info in pod_response.items: + pod_name = pod_info.metadata.name + pods.append(pod_name) + return pods + + +def log_stream_from_string(s: str): + # Note response has to be UTF-8 encoded because the caller expects to decode it + yield ("ignore", s.encode()) + + +def named_volumes_from_pod_files(parsed_pod_files): + # Parse the compose files looking for named volumes + named_volumes = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume in volumes.keys(): + # Volume definition looks like: + # 'laconicd-data': None + named_volumes.append(volume) + return named_volumes + + +def get_node_pv_mount_path(volume_name: str): + return f"/mnt/{volume_name}" + + +def volume_mounts_for_service(parsed_pod_files, service): + result = [] + # Find the service + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + if service_name == service: + service_obj = services[service_name] + if "volumes" in service_obj: + volumes = service_obj["volumes"] + for mount_string in volumes: + # Looks like: test-data:/data + (volume_name, mount_path) = mount_string.split(":") + volume_device = client.V1VolumeMount(mount_path=mount_path, name=volume_name) + result.append(volume_device) + return result + + +def volumes_for_pod_files(parsed_pod_files): + result = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume_name in volumes.keys(): + claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name) + volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) + result.append(volume) + return result + + +def _get_host_paths_for_volumes(parsed_pod_files): + result = {} + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume_name in volumes.keys(): + volume_definition = volumes[volume_name] + host_path = volume_definition["driver_opts"]["device"] + result[volume_name] = host_path + return result + + +def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path: + if os.path.isabs(data_mount_path): + return data_mount_path + else: + # Python Path voodo that looks pretty odd: + return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve() + + +def _generate_kind_mounts(parsed_pod_files, deployment_dir): + volume_definitions = [] + volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) + # Note these paths are relative to the location of the pod files (at present) + # So we need to fix up to make them correct and absolute because kind assumes + # relative to the cwd. + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + service_obj = services[service_name] + if "volumes" in service_obj: + volumes = service_obj["volumes"] + for mount_string in volumes: + # Looks like: test-data:/data + (volume_name, mount_path) = mount_string.split(":") + volume_definitions.append( + f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" + f" containerPath: {get_node_pv_mount_path(volume_name)}" + ) + return ( + "" if len(volume_definitions) == 0 else ( + " extraMounts:\n" + f"{''.join(volume_definitions)}" + ) + ) + + +def _generate_kind_port_mappings(parsed_pod_files): + port_definitions = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + service_obj = services[service_name] + if "ports" in service_obj: + ports = service_obj["ports"] + for port_string in ports: + # TODO handle the complex cases + # Looks like: 80 or something more complicated + port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}") + return ( + "" if len(port_definitions) == 0 else ( + " extraPortMappings:\n" + f"{''.join(port_definitions)}" + ) + ) + + +def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]: + result = [] + for env_var, env_val in map.items(): + result.append(client.V1EnvVar(env_var, env_val)) + return result + + +# This needs to know: +# The service ports for the cluster +# The bind mounted volumes for the cluster +# +# Make ports like this: +# extraPortMappings: +# - containerPort: 80 +# hostPort: 80 +# # optional: set the bind address on the host +# # 0.0.0.0 is the current default +# listenAddress: "127.0.0.1" +# # optional: set the protocol to one of TCP, UDP, SCTP. +# # TCP is the default +# protocol: TCP +# Make bind mounts like this: +# extraMounts: +# - hostPath: /path/to/my/files +# containerPath: /files +def generate_kind_config(deployment_dir: Path): + compose_file_dir = deployment_dir.joinpath("compose") + # TODO: this should come from the stack file, not this way + pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()] + parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files) + port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map) + mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir) + return ( + "kind: Cluster\n" + "apiVersion: kind.x-k8s.io/v1alpha4\n" + "nodes:\n" + "- role: control-plane\n" + f"{port_mappings_yml}\n" + f"{mounts_yml}\n" + ) diff --git a/stack_orchestrator/deploy/spec.py b/stack_orchestrator/deploy/spec.py new file mode 100644 index 00000000..c4f791bf --- /dev/null +++ b/stack_orchestrator/deploy/spec.py @@ -0,0 +1,42 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from pathlib import Path +import typing +from stack_orchestrator.util import get_yaml +from stack_orchestrator import constants + + +class Spec: + + obj: typing.Any + + def __init__(self) -> None: + pass + + def init_from_file(self, file_path: Path): + with file_path: + self.obj = get_yaml().load(open(file_path, "r")) + + def get_image_registry(self): + return (self.obj[constants.image_resigtry_key] + if self.obj and constants.image_resigtry_key in self.obj + else None) + + def get_http_proxy(self): + return (self.obj[constants.network_key][constants.http_proxy_key] + if self.obj and constants.network_key in self.obj + and constants.http_proxy_key in self.obj[constants.network_key] + else None) diff --git a/app/deploy/stack.py b/stack_orchestrator/deploy/stack.py similarity index 87% rename from app/deploy/stack.py rename to stack_orchestrator/deploy/stack.py index 1f94acdf..1a493534 100644 --- a/app/deploy/stack.py +++ b/stack_orchestrator/deploy/stack.py @@ -15,15 +15,16 @@ from pathlib import Path import typing -from app.util import get_yaml +from stack_orchestrator.util import get_yaml class Stack: + name: str obj: typing.Any - def __init__(self) -> None: - pass + def __init__(self, name: str) -> None: + self.name = name def init_from_file(self, file_path: Path): with file_path: diff --git a/app/deploy/stack_state.py b/stack_orchestrator/deploy/stack_state.py similarity index 100% rename from app/deploy/stack_state.py rename to stack_orchestrator/deploy/stack_state.py diff --git a/app/repos/__init__.py b/stack_orchestrator/deploy/webapp/__init__.py similarity index 100% rename from app/repos/__init__.py rename to stack_orchestrator/deploy/webapp/__init__.py diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py new file mode 100644 index 00000000..391162c9 --- /dev/null +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -0,0 +1,118 @@ +# Copyright ©2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click +import os +from pathlib import Path +from urllib.parse import urlparse +from tempfile import NamedTemporaryFile + +from stack_orchestrator.util import error_exit, global_options2 +from stack_orchestrator.deploy.deployment_create import init_operation, create_operation +from stack_orchestrator.deploy.deploy import create_deploy_context +from stack_orchestrator.deploy.deploy_types import DeployCommandContext + + +def _fixup_container_tag(deployment_dir: str, image: str): + deployment_dir_path = Path(deployment_dir) + compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml") + # replace "cerc/webapp-container:local" in the file with our image tag + with open(compose_file) as rfile: + contents = rfile.read() + contents = contents.replace("cerc/webapp-container:local", image) + with open(compose_file, "w") as wfile: + wfile.write(contents) + + +def _fixup_url_spec(spec_file_name: str, url: str): + # url is like: https://example.com/path + parsed_url = urlparse(url) + http_proxy_spec = f''' + http-proxy: + - host-name: {parsed_url.hostname} + routes: + - path: '{parsed_url.path if parsed_url.path else "/"}' + proxy-to: webapp:3000 + ''' + spec_file_path = Path(spec_file_name) + with open(spec_file_path) as rfile: + contents = rfile.read() + contents = contents + http_proxy_spec + with open(spec_file_path, "w") as wfile: + wfile.write(contents) + + +@click.group() +@click.pass_context +def command(ctx): + '''manage a webapp deployment''' + + # Check that --stack wasn't supplied + if ctx.parent.obj.stack: + error_exit("--stack can't be supplied with the deploy-webapp command") + + +@command.command() +@click.option("--kube-config", help="Provide a config file for a k8s deployment") +@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") +@click.option("--deployment-dir", help="Create deployment files in this directory", required=True) +@click.option("--image", help="image to deploy", required=True) +@click.option("--url", help="url to serve", required=True) +@click.option("--env-file", help="environment file for webapp") +@click.pass_context +def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file): + '''create a deployment for the specified webapp container''' + # Do the equivalent of: + # 1. laconic-so --stack webapp-template deploy --deploy-to k8s init --output webapp-spec.yml + # --config (eqivalent of the contents of my-config.env) + # 2. laconic-so --stack webapp-template deploy --deploy-to k8s create --deployment-dir test-deployment + # --spec-file webapp-spec.yml + # 3. Replace the container image tag with the specified image + deployment_dir_path = Path(deployment_dir) + # Check the deployment dir does not exist + if deployment_dir_path.exists(): + error_exit(f"Deployment dir {deployment_dir} already exists") + # Generate a temporary file name for the spec file + tf = NamedTemporaryFile(prefix="webapp-", suffix=".yml", delete=False) + spec_file_name = tf.name + # Specify the webapp template stack + stack = "webapp-template" + # TODO: support env file + deploy_command_context: DeployCommandContext = create_deploy_context( + global_options2(ctx), None, stack, None, None, None, env_file, "k8s" + ) + init_operation( + deploy_command_context, + stack, + "k8s", + None, + env_file, + kube_config, + image_registry, + spec_file_name, + None + ) + # Add the TLS and DNS spec + _fixup_url_spec(spec_file_name, url) + create_operation( + deploy_command_context, + spec_file_name, + deployment_dir, + None, + None + ) + # Fix up the container tag inside the deployment compose file + _fixup_container_tag(deployment_dir, image) + os.remove(spec_file_name) diff --git a/stack_orchestrator/deploy/webapp/run_webapp.py b/stack_orchestrator/deploy/webapp/run_webapp.py new file mode 100644 index 00000000..4dbf234a --- /dev/null +++ b/stack_orchestrator/deploy/webapp/run_webapp.py @@ -0,0 +1,65 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds webapp containers + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import hashlib +import click +from dotenv import dotenv_values + +from stack_orchestrator import constants +from stack_orchestrator.deploy.deployer_factory import getDeployer + +WEBAPP_PORT = 3000 + + +@click.command() +@click.option("--image", help="image to deploy", required=True) +@click.option("--env-file", help="environment file for webapp") +@click.option("--port", help="port to use (default random)") +@click.pass_context +def command(ctx, image, env_file, port): + '''run the specified webapp container''' + + env = {} + if env_file: + env = dotenv_values(env_file) + + unique_cluster_descriptor = f"{image},{env}" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() + cluster = f"laconic-webapp-{hash}" + + deployer = getDeployer(type=constants.compose_deploy_type, + deployment_context=None, + compose_files=None, + compose_project_name=cluster, + compose_env_file=None) + + ports = [] + if port: + ports = [(port, WEBAPP_PORT)] + container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, ports=ports, detach=True) + + # Make configurable? + webappPort = f"{WEBAPP_PORT}/tcp" + # TODO: This assumes a Docker container object... + if webappPort in container.network_settings.ports: + mapping = container.network_settings.ports[webappPort][0] + print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""") diff --git a/cli.py b/stack_orchestrator/main.py similarity index 68% rename from cli.py rename to stack_orchestrator/main.py index 5dea43ca..26a011b0 100644 --- a/cli.py +++ b/stack_orchestrator/main.py @@ -15,14 +15,17 @@ import click -from app.command_types import CommandOptions -from app.repos import setup_repositories -from app.build import build_containers -from app.build import build_npms -from app.deploy import deploy -from app import version -from app.deploy import deployment -from app import update +from stack_orchestrator.command_types import CommandOptions +from stack_orchestrator.repos import setup_repositories +from stack_orchestrator.build import build_containers +from stack_orchestrator.build import build_npms +from stack_orchestrator.build import build_webapp +from stack_orchestrator.deploy.webapp import run_webapp, deploy_webapp +from stack_orchestrator.deploy import deploy +from stack_orchestrator import version +from stack_orchestrator.deploy import deployment +from stack_orchestrator import opts +from stack_orchestrator import update CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @@ -39,12 +42,17 @@ CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.pass_context def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error): """Laconic Stack Orchestrator""" - ctx.obj = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + opts.opts.o = command_options + ctx.obj = command_options cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(build_containers.command, "build-containers") cli.add_command(build_npms.command, "build-npms") +cli.add_command(build_webapp.command, "build-webapp") +cli.add_command(run_webapp.command, "run-webapp") +cli.add_command(deploy_webapp.command, "deploy-webapp") cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy-system") cli.add_command(deployment.command, "deployment") diff --git a/app/deploy/spec.py b/stack_orchestrator/opts.py similarity index 67% rename from app/deploy/spec.py rename to stack_orchestrator/opts.py index a23bc167..665da535 100644 --- a/app/deploy/spec.py +++ b/stack_orchestrator/opts.py @@ -1,4 +1,4 @@ -# Copyright © 2022, 2023 Vulcanize +# Copyright © 2023 Vulcanize # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -13,18 +13,8 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from pathlib import Path -import typing -from app.util import get_yaml +from stack_orchestrator.command_types import CommandOptions -class Spec: - - obj: typing.Any - - def __init__(self) -> None: - pass - - def init_from_file(self, file_path: Path): - with file_path: - self.obj = get_yaml().load(open(file_path, "r")) +class opts: + o: CommandOptions = None diff --git a/stack_orchestrator/repos/__init__.py b/stack_orchestrator/repos/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py similarity index 91% rename from app/repos/setup_repositories.py rename to stack_orchestrator/repos/setup_repositories.py index 0ce11670..3612aed0 100644 --- a/app/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -25,7 +25,8 @@ import click import importlib.resources from pathlib import Path import yaml -from app.util import include_exclude_check +from stack_orchestrator.constants import stack_file_name +from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit class GitProgress(git.RemoteProgress): @@ -232,19 +233,26 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches os.makedirs(dev_root_path) # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file: all_repos = repository_list_file.read().splitlines() repos_in_scope = [] if stack: - # In order to be compatible with Python 3.8 we need to use this hack to get the path: - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, "stack.yml") + if stack_is_external(stack): + stack_file_path = Path(stack).joinpath(stack_file_name) + else: + # In order to be compatible with Python 3.8 we need to use this hack to get the path: + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, stack_file_name) + if not stack_file_path.exists(): + error_exit(f"stack {stack} does not exist") with stack_file_path: stack_config = yaml.safe_load(open(stack_file_path, "r")) - # TODO: syntax check the input here - repos_in_scope = stack_config['repos'] + if "repos" not in stack_config: + error_exit(f"stack {stack} does not define any repositories") + else: + repos_in_scope = stack_config["repos"] else: repos_in_scope = all_repos diff --git a/app/update.py b/stack_orchestrator/update.py similarity index 98% rename from app/update.py rename to stack_orchestrator/update.py index 9f70b06e..a41eabae 100644 --- a/app/update.py +++ b/stack_orchestrator/update.py @@ -23,7 +23,7 @@ import sys import stat import shutil import validators -from app.util import get_yaml +from stack_orchestrator.util import get_yaml def _download_url(url: str, file_path: Path): diff --git a/app/util.py b/stack_orchestrator/util.py similarity index 87% rename from app/util.py rename to stack_orchestrator/util.py index a25aacdb..0bd1a609 100644 --- a/app/util.py +++ b/stack_orchestrator/util.py @@ -18,6 +18,8 @@ import os.path import sys import ruamel.yaml from pathlib import Path +from dotenv import dotenv_values +from typing import Mapping def include_exclude_check(s, include, exclude): @@ -79,16 +81,16 @@ def get_pod_list(parsed_stack): return result -def get_plugin_code_path(stack): +def get_plugin_code_paths(stack): parsed_stack = get_parsed_stack_config(stack) pods = parsed_stack["pods"] - # TODO: Hack - pod = pods[0] - if type(pod) is str: - result = get_stack_file_path(stack).parent - else: - pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) - result = Path(os.path.join(pod_root_dir, "stack")) + result = [] + for pod in pods: + if type(pod) is str: + result.append(get_stack_file_path(stack).parent) + else: + pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) + result.append(Path(os.path.join(pod_root_dir, "stack"))) return result @@ -150,6 +152,12 @@ def get_parsed_deployment_spec(spec_file): sys.exit(1) +def stack_is_external(stack: str): + # Bit of a hack: if the supplied stack string represents + # a path that exists then we assume it must be external + return Path(stack).exists() if stack is not None else False + + def get_yaml(): # See: https://stackoverflow.com/a/45701840/1701505 yaml = ruamel.yaml.YAML() @@ -167,3 +175,12 @@ def global_options(ctx): # TODO: hack def global_options2(ctx): return ctx.parent.obj + + +def error_exit(s): + print(f"ERROR: {s}") + sys.exit(1) + + +def env_var_map_from_file(file: Path) -> Mapping[str, str]: + return dotenv_values(file) diff --git a/app/version.py b/stack_orchestrator/version.py similarity index 96% rename from app/version.py rename to stack_orchestrator/version.py index 5a5c33d4..68e47b44 100644 --- a/app/version.py +++ b/stack_orchestrator/version.py @@ -23,7 +23,7 @@ def command(ctx): '''print tool version''' # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "build_tag.txt") as version_file: # TODO: code better version that skips comment lines version_string = version_file.read().splitlines()[1] diff --git a/tests/fixturenet-laconicd/run-test.sh b/tests/fixturenet-laconicd/run-test.sh new file mode 100755 index 00000000..8dad9917 --- /dev/null +++ b/tests/fixturenet-laconicd/run-test.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Laconicd fixturenet test" +env +cat /etc/hosts +# Bit of a hack, test the most recent package +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Set a new unique repo dir +export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX) +echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO" +echo "$(date +"%Y-%m-%d %T"): Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "$(date +"%Y-%m-%d %T"): Version reported is: ${reported_version_string}" + +echo "$(date +"%Y-%m-%d %T"): Cloning laconicd repositories into: $CERC_REPO_BASE_DIR" +$TEST_TARGET_SO --stack fixturenet-laconicd setup-repositories + +echo "$(date +"%Y-%m-%d %T"): Building containers" +$TEST_TARGET_SO --stack fixturenet-laconicd build-containers +echo "$(date +"%Y-%m-%d %T"): Starting stack" +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up +echo "$(date +"%Y-%m-%d %T"): Stack started" +# Verify that the fixturenet is up and running +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps + +timeout=900 # 15 minutes +echo "$(date +"%Y-%m-%d %T"): Getting initial block number. Timeout set to $timeout seconds" +start_time=$(date +%s) +elapsed_time=0 +initial_block_number=null +while [ "$initial_block_number" == "null" ] && [ $elapsed_time -lt $timeout ]; do + sleep 10 + echo "$(date +"%Y-%m-%d %T"): Waiting for initial block..." + initial_block_number=$(docker exec laconicd-laconicd-1 /usr/bin/laconicd status | jq -r .SyncInfo.latest_block_height) + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) +done + +subsequent_block_number=$initial_block_number + +# if initial block was 0 after timeout, assume chain did not start successfully and skip finding subsequent block +if [[ $initial_block_number != "null" ]]; then + timeout=300 + echo "$(date +"%Y-%m-%d %T"): Getting subsequent block number. Timeout set to $timeout seconds" + start_time=$(date +%s) + elapsed_time=0 + # wait for 5 blocks or timeout + while [ "$subsequent_block_number" -le $((initial_block_number + 5)) ] && [ $elapsed_time -lt $timeout ]; do + sleep 10 + echo "$(date +"%Y-%m-%d %T"): Waiting for five blocks or $timeout seconds..." + subsequent_block_number=$(docker exec laconicd-laconicd-1 /usr/bin/laconicd status | jq -r .SyncInfo.latest_block_height) + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + done +fi + +# will return 0 if either of the above loops timed out +block_number_difference=$((subsequent_block_number - initial_block_number)) + +echo "$(date +"%Y-%m-%d %T"): Results of block height queries:" +echo "Initial block height: $initial_block_number" +echo "Subsequent block height: $subsequent_block_number" + +# Block height difference should be between 1 and some small number +if [[ $block_number_difference -gt 1 && $block_number_difference -lt 100 ]]; then + echo "Test passed" + test_result=0 +else + echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}" + echo "Logs from stack:" + $TEST_TARGET_SO --stack fixturenet-laconicd deploy logs + test_result=1 +fi + +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes +echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories" +rm -rf $CERC_REPO_BASE_DIR +echo "$(date +"%Y-%m-%d %T"): Test finished" +exit $test_result diff --git a/tests/k8s-deploy/run-deploy-test.sh b/tests/k8s-deploy/run-deploy-test.sh new file mode 100755 index 00000000..b7ee9dd0 --- /dev/null +++ b/tests/k8s-deploy/run-deploy-test.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +# Note: eventually this test should be folded into ../deploy/ +# but keeping it separate for now for convenience +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Dump environment variables for debugging +echo "Environment variables:" +env +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +# Test basic stack-orchestrator deploy +test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir +test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml +$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED +# Check the file now exists +if [ ! -f "$test_deployment_spec" ]; then + echo "deploy init test: spec file not present" + echo "deploy init test: FAILED" + exit 1 +fi +echo "deploy init test: passed" +$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir +# Check the deployment dir exists +if [ ! -d "$test_deployment_dir" ]; then + echo "deploy create test: deployment directory not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create test: passed" +# Check the file writted by the create command in the stack now exists +if [ ! -f "$test_deployment_dir/create-file" ]; then + echo "deploy create test: create output file not present" + echo "deploy create test: FAILED" + exit 1 +fi +# And has the right content +create_file_content=$(<$test_deployment_dir/create-file) +if [ ! "$create_file_content" == "create-command-output-data" ]; then + echo "deploy create test: create output file contents not correct" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create output file test: passed" +# Try to start the deployment +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +# TODO: add a check to see if the container is up +# Sleep because k8s not up yet +sleep 30 +# Check logs command works +log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then + echo "deployment logs test: passed" +else + echo "deployment logs test: FAILED" + exit 1 +fi +# Check the config variable CERC_TEST_PARAM_1 was passed correctly +if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then + echo "deployment config test: passed" +else + echo "deployment config test: FAILED" + exit 1 +fi +# Stop and clean up +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes +echo "Test passed" diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh new file mode 100755 index 00000000..5db382f8 --- /dev/null +++ b/tests/webapp-test/run-webapp-test.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +set -e + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Dump environment variables for debugging +echo "Environment variables:" +env +# Test basic stack-orchestrator webapp +echo "Running stack-orchestrator webapp test" +# Bit of a hack, test the most recent package +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +git clone https://git.vdb.to/cerc-io/test-progressive-web-app.git $CERC_REPO_BASE_DIR/test-progressive-web-app + +# Test webapp command execution +$TEST_TARGET_SO build-webapp --source-repo $CERC_REPO_BASE_DIR/test-progressive-web-app + +CHECK="SPECIAL_01234567890_TEST_STRING" + +set +e + +CONTAINER_ID=$(docker run -p 3000:3000 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local) +sleep 3 +wget -t 7 -O test.before -m http://localhost:3000 + +docker logs $CONTAINER_ID +docker remove -f $CONTAINER_ID + +CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local) +sleep 3 +wget -t 7 -O test.after -m http://localhost:3000 + +docker logs $CONTAINER_ID +docker remove -f $CONTAINER_ID + +echo "###########################################################################" +echo "" + +grep "$CHECK" test.before > /dev/null +if [ $? -ne 1 ]; then + echo "BEFORE: FAILED" + exit 1 +else + echo "BEFORE: PASSED" +fi + +grep "$CHECK" test.after > /dev/null +if [ $? -ne 0 ]; then + echo "AFTER: FAILED" + exit 1 +else + echo "AFTER: PASSED" +fi + +exit 0