diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml
deleted file mode 100644
index 49cc2de3..00000000
--- a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-version: "3.2"
-
-services:
- migrations:
- restart: on-failure
- depends_on:
- ipld-eth-db:
- condition: service_healthy
- image: cerc/ipld-eth-db:local
- env_file:
- - ../config/mainnet-eth-ipld-eth-db/db.env
-
- ipld-eth-db:
- image: timescale/timescaledb:2.8.1-pg14
- restart: always
- env_file:
- - ../config/mainnet-eth-ipld-eth-db/db.env
- volumes:
- - mainnet_eth_ipld_eth_db:/var/lib/postgresql/data
- healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "5432"]
- interval: 30s
- timeout: 10s
- retries: 10
- start_period: 3s
- ports:
- - "5432"
-volumes:
- mainnet_eth_ipld_eth_db:
diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml
deleted file mode 100644
index 4341c6a1..00000000
--- a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "3.7"
-services:
- ipld-eth-server:
- restart: always
- depends_on:
- ipld-eth-db:
- condition: service_healthy
- image: cerc/ipld-eth-server:local
- env_file:
- - ../config/mainnet-eth-ipld-eth-db/db.env
- - ../config/mainnet-eth-ipld-eth-server/srv.env
- volumes:
- - ../config/mainnet-eth-ipld-eth-server/config.toml:/app/config.toml:ro
- ports:
- - "8081"
- - "8082"
- - "8090"
- - "40001"
- healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "8081"]
- interval: 20s
- timeout: 5s
- retries: 15
- start_period: 5s
diff --git a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml b/app/data/compose/docker-compose-mainnet-eth-keycloak.yml
index 1674c62e..dfa9a804 100644
--- a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml
+++ b/app/data/compose/docker-compose-mainnet-eth-keycloak.yml
@@ -6,7 +6,7 @@ services:
env_file:
- ../config/mainnet-eth-keycloak/keycloak.env
healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "35432"]
+ test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
@@ -14,7 +14,7 @@ services:
volumes:
- mainnet_eth_keycloak_db:/var/lib/postgresql/data
ports:
- - 35432
+ - 5432
keycloak:
image: cerc/keycloak:local
diff --git a/app/data/compose/docker-compose-mainnet-eth-plugeth.yml b/app/data/compose/docker-compose-mainnet-eth-plugeth.yml
deleted file mode 100644
index a8b301d2..00000000
--- a/app/data/compose/docker-compose-mainnet-eth-plugeth.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-
-services:
-
- mainnet-eth-geth-1:
- restart: always
- hostname: mainnet-eth-geth-1
- cap_add:
- - SYS_PTRACE
- image: cerc/plugeth-with-plugins:local
- entrypoint: /bin/sh
- command: -c "/opt/run-geth.sh"
- env_file:
- - ../config/mainnet-eth-ipld-eth-db/db.env
- - ../config/mainnet-eth-plugeth/geth.env
- volumes:
- - mainnet_eth_plugeth_geth_1_data:/data
- - mainnet_eth_plugeth_config_data:/etc/mainnet-eth
- - ../config/mainnet-eth-plugeth/scripts/run-geth.sh:/opt/run-geth.sh
- healthcheck:
- test: ["CMD", "nc", "-v", "localhost", "8545"]
- interval: 30s
- timeout: 10s
- retries: 10
- start_period: 3s
- ports:
- # http api
- - "8545"
- # ws api
- - "8546"
- # ws el
- - "8551"
- # p2p
- - "30303"
- - "30303/udp"
- # debugging
- - "40000"
- # metrics
- - "6060"
-
- mainnet-eth-lighthouse-1:
- restart: always
- hostname: mainnet-eth-lighthouse-1
- healthcheck:
- test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:5052/eth/v2/beacon/blocks/head"]
- interval: 30s
- timeout: 10s
- retries: 10
- start_period: 30s
- environment:
- LIGHTHOUSE_EXECUTION_ENDPOINT: "http://mainnet-eth-geth-1:8551"
- env_file:
- - ../config/mainnet-eth-plugeth/lighthouse.env
- image: cerc/lighthouse:local
- entrypoint: /bin/sh
- command: -c "/opt/run-lighthouse.sh"
- volumes:
- - mainnet_eth_plugeth_lighthouse_1_data:/data
- - mainnet_eth_plugeth_config_data:/etc/mainnet-eth
- - ../config/mainnet-eth-plugeth/scripts/run-lighthouse.sh:/opt/run-lighthouse.sh
- ports:
- # api
- - "5052"
- # metrics
- - "5054"
- # p2p
- - "9000"
- - "9000/udp"
-
-volumes:
- mainnet_eth_plugeth_config_data:
- mainnet_eth_plugeth_geth_1_data:
- mainnet_eth_plugeth_lighthouse_1_data:
diff --git a/app/data/config/mainnet-eth-ipld-eth-db/db.env b/app/data/config/mainnet-eth-ipld-eth-db/db.env
deleted file mode 100644
index 4ec11109..00000000
--- a/app/data/config/mainnet-eth-ipld-eth-db/db.env
+++ /dev/null
@@ -1,15 +0,0 @@
-DATABASE_HOSTNAME="ipld-eth-db"
-DATABASE_NAME="cerc"
-DATABASE_PASSWORD="CHANGEME"
-DATABASE_PORT=5432
-DATABASE_USER="vdbm"
-
-POSTGRES_DB="${DATABASE_NAME}"
-POSTGRES_PASSWORD="${DATABASE_PASSWORD}"
-POSTGRES_USER="${DATABASE_USER}"
-
-CERC_STATEDIFF_DB_HOST="${DATABASE_HOSTNAME}"
-CERC_STATEDIFF_DB_NAME="${DATABASE_NAME}"
-CERC_STATEDIFF_DB_PASSWORD="${DATABASE_PASSWORD}"
-CERC_STATEDIFF_DB_PORT=${DATABASE_PORT}
-CERC_STATEDIFF_DB_USER="${DATABASE_USER}"
diff --git a/app/data/config/mainnet-eth-ipld-eth-server/config.toml b/app/data/config/mainnet-eth-ipld-eth-server/config.toml
deleted file mode 100644
index c433df28..00000000
--- a/app/data/config/mainnet-eth-ipld-eth-server/config.toml
+++ /dev/null
@@ -1,33 +0,0 @@
-[database]
- name = "" # $DATABASE_NAME
- hostname = "" # $DATABASE_HOSTNAME
- port = 5432 # $DATABASE_PORT
- user = "" # $DATABASE_USER
- password = "" # $DATABASE_PASSWORD
-
-[log]
- level = "info" # $LOG_LEVEL
-
-[server]
- ipc = false
- ipcPath = "" # $SERVER_IPC_PATH
- ws = false
- wsPath = "0.0.0.0:8080" # $SERVER_WS_PATH
- http = true
- httpPath = "0.0.0.0:8081" # $SERVER_HTTP_PATH
- graphql = false # $SERVER_GRAPHQL
- graphqlPath = "0.0.0.0:8082" # $SERVER_GRAPHQL_PATH
-
-[ethereum]
- chainConfig = "" # ETH_CHAIN_CONFIG
- chainID = "1" # $ETH_CHAIN_ID
- rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
- httpPath = "mainnet-eth-geth-1:8545" # $ETH_HTTP_PATH
- supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
- stateDiffTimeout = "4m" # $ETH_STATEDIFF_TIMEOUT
- forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS
- proxyOnError = true # $ETH_PROXY_ON_ERROR
- nodeID = "" # $ETH_NODE_ID
- clientName = "" # $ETH_CLIENT_NAME
- genesisBlock = "" # $ETH_GENESIS_BLOCK
- networkID = "1" # $ETH_NETWORK_ID
diff --git a/app/data/config/mainnet-eth-ipld-eth-server/srv.env b/app/data/config/mainnet-eth-ipld-eth-server/srv.env
deleted file mode 100644
index 34c79ce4..00000000
--- a/app/data/config/mainnet-eth-ipld-eth-server/srv.env
+++ /dev/null
@@ -1,27 +0,0 @@
-CERC_REMOTE_DEBUG="false"
-
-LOG_LEVEL="debug"
-
-ETH_CHAIN_ID=1
-ETH_CLIENT_NAME="Geth"
-ETH_FORWARD_ETH_CALLS="false"
-ETH_FORWARD_GET_STORAGE_AT="false"
-ETH_GENESIS_BLOCK="0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
-ETH_HTTP_PATH="mainnet-eth-geth-1:8545"
-ETH_NETWORK_ID=1
-ETH_NODE_ID=1112
-ETH_PROXY_ON_ERROR="true"
-ETH_RPC_GAS_CAP=1000000000000
-ETH_SUPPORTS_STATEDIFF="true"
-ETH_STATEDIFF_TIMEOUT=4m
-
-SERVER_HTTP_PATH=0.0.0.0:8081
-SERVER_GRAPHQL="false"
-SERVER_GRAPHQLPATH=0.0.0.0:8082
-
-METRICS="true"
-PROM_HTTP="true"
-PROM_HTTP_ADDR="0.0.0.0"
-PROM_HTTP_PORT="8090"
-
-VDB_COMMAND="serve"
diff --git a/app/data/config/mainnet-eth-keycloak/keycloak.env b/app/data/config/mainnet-eth-keycloak/keycloak.env
index 31a19079..f37fdd30 100644
--- a/app/data/config/mainnet-eth-keycloak/keycloak.env
+++ b/app/data/config/mainnet-eth-keycloak/keycloak.env
@@ -1,11 +1,8 @@
POSTGRES_DB=keycloak
POSTGRES_USER=keycloak
POSTGRES_PASSWORD=keycloak
-# Don't change this unless you also change the healthcheck in docker-compose-mainnet-eth-keycloak.yml
-PGPORT=35432
KC_DB=postgres
KC_DB_URL_HOST=keycloak-db
-KC_DB_URL_PORT=${PGPORT}
KC_DB_URL_DATABASE=${POSTGRES_DB}
KC_DB_USERNAME=${POSTGRES_USER}
KC_DB_PASSWORD=${POSTGRES_PASSWORD}
diff --git a/app/data/config/mainnet-eth-keycloak/nginx.example b/app/data/config/mainnet-eth-keycloak/nginx.example
index 758f0ce1..67095551 100644
--- a/app/data/config/mainnet-eth-keycloak/nginx.example
+++ b/app/data/config/mainnet-eth-keycloak/nginx.example
@@ -15,49 +15,42 @@ server {
}
upstream geth-pool {
- server server-a:8545 max_fails=10 fail_timeout=2s;
- server server-c:8545 max_fails=10 fail_timeout=2s backup;
- server server-b:8545 max_fails=10 fail_timeout=2s backup;
- keepalive 200;
+ keepalive 100;
+ hash $user_id consistent;
+ server server-a:8545;
+ server server-b:8545;
+ server server-c:8545;
}
+# self-reg happens on one server for clarity
upstream reg-ui-pool {
- keepalive 2;
+ keepalive 100;
server server-a:8085;
}
upstream reg-api-pool {
- keepalive 2;
+ keepalive 100;
server server-a:8086;
}
-# auth uses the reg server when available
+# auth uses server-a if available
upstream auth-pool {
- keepalive 10;
+ keepalive 100;
server server-a:8080;
server server-b:8080 backup;
server server-c:8080 backup;
}
-
-log_format upstreamlog '[$time_local] $msec $remote_addr $user_id - $server_name($host) to $upstream_addr: $request $status upstream_response_time $upstream_response_time request_time $request_time';
-proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m;
-
+log_format upstreamlog '[$time_local] $remote_addr $user_id - $server_name $host to: $upstream_addr: $request $status upstream_response_time $upstream_response_time msec $msec request_time $request_time';
+proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m;
server {
listen 443 ssl http2;
server_name my.example.com;
- keepalive_requests 500000;
- keepalive_timeout 90s;
- http2_max_requests 5000000;
- http2_max_concurrent_streams 1024;
- http2_idle_timeout 3m;
- http2_recv_timeout 30s;
access_log /var/log/nginx/my.example.com-access.log upstreamlog;
error_log /var/log/nginx/my.example.com-error.log;
ssl_certificate /etc/nginx/ssl/my.example.com/cert.pem;
ssl_certificate_key /etc/nginx/ssl/my.example.com/key.pem;
- ssl_session_cache shared:SSL:10m;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
@@ -67,6 +60,7 @@ server {
#rewrite ^/?$ /newuser/;
rewrite ^/?$ https://www.example.com/;
+
# geth-pool ETH API
location ~ ^/v1/eth/?([^/]*)$ {
set $apiKey $1;
@@ -77,8 +71,8 @@ server {
auth_request_set $user_id $sent_http_x_user_id;
rewrite /.*$ / break;
- client_max_body_size 3m;
- client_body_buffer_size 3m;
+ client_max_body_size 3m;
+ client_body_buffer_size 3m;
proxy_buffer_size 32k;
proxy_buffers 16 32k;
proxy_busy_buffers_size 96k;
@@ -86,10 +80,8 @@ server {
proxy_pass http://geth-pool;
proxy_set_header X-Original-Remote-Addr $remote_addr;
proxy_set_header X-User-Id $user_id;
- proxy_http_version 1.1;
- proxy_set_header Connection "";
}
-
+
# keycloak
location = /auth {
internal;
@@ -103,8 +95,6 @@ server {
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Original-Remote-Addr $remote_addr;
proxy_set_header X-Original-Host $host;
- proxy_http_version 1.1;
- proxy_set_header Connection "";
}
location /newuser/ {
diff --git a/app/data/config/mainnet-eth-plugeth/geth.env b/app/data/config/mainnet-eth-plugeth/geth.env
deleted file mode 100644
index 5c936d36..00000000
--- a/app/data/config/mainnet-eth-plugeth/geth.env
+++ /dev/null
@@ -1,75 +0,0 @@
-# Enable remote debugging using dlv
-CERC_REMOTE_DEBUG=false
-
-# Enable startup script debug output.
-CERC_SCRIPT_DEBUG=false
-
-# Simple toggle to choose either a 'full' node or an 'archive' node
-# (controls the values of --syncmode --gcmode --snapshot)
-CERC_GETH_MODE_QUICK_SET=archive
-
-# Path to plugeth plugins.
-CERC_PLUGINS_DIR="/usr/local/lib/plugeth"
-
-# Will turn on statediffing automatically if CERC_STATEDIFF_DB_HOST exists (see ../mainnet-eth-ipld-eth-db/db.env).
-CERC_RUN_STATEDIFF="detect"
-
-# The minimum necessary verion of the DB to enable statediffing.
-CERC_STATEDIFF_DB_GOOSE_MIN_VER=18
-
-# Whether all statediff-related DB statements should be logged (useful for debugging).
-CERC_STATEDIFF_DB_LOG_STATEMENTS=false
-
-# The number of concurrent workers to process state diff objects
-CERC_STATEDIFF_WORKERS=16
-
-# Each statediffing node should have a unique node ID.
-CERC_STATEDIFF_DB_NODE_ID=1111
-
-# Optional custom node name.
-# GETH_NODE_NAME=""
-
-# Specify any other geth CLI options.
-GETH_OPTS=""
-
-# --cache
-GETH_CACHE=1024
-
-# --cache.database
-GETH_CACHE_DB=50
-
-# --cache.gc
-GETH_CACHE_GC=25
-
-# --cache.trie
-GETH_CACHE_TRIE=15
-
-# --datadir
-GETH_DATADIR="/data"
-
-# --http.api
-GETH_HTTP_API="eth,web3,net"
-
-# --authrpc.jwtsecret
-GETH_JWTSECRET="/etc/mainnet-eth/jwtsecret"
-
-# --maxpeers
-GETH_MAX_PEERS=100
-
-# --rpc.evmtimeout
-GETH_RPC_EVMTIMEOUT=0
-
-# --rpc.gascap
-GETH_RPC_GASCAP=0
-
-# --txlookuplimit
-GETH_TXLOOKUPLIMIT=0
-
-# --verbosity
-GETH_VERBOSITY=3
-
-# --log.vmodule
-GETH_VMODULE="rpc/*=4"
-
-# --ws.api
-GETH_WS_API="eth,web3,net"
diff --git a/app/data/config/mainnet-eth-plugeth/lighthouse.env b/app/data/config/mainnet-eth-plugeth/lighthouse.env
deleted file mode 100644
index 11fc6b69..00000000
--- a/app/data/config/mainnet-eth-plugeth/lighthouse.env
+++ /dev/null
@@ -1,33 +0,0 @@
-# Enable startup script debug output.
-CERC_SCRIPT_DEBUG=false
-
-# Specify any other lighthouse CLI options.
-LIGHTHOUSE_OPTS=""
-
-# Override the advertised public IP (optional)
-# --enr-address
-#LIGHTHOUSE_ENR_ADDRESS=""
-
-# --checkpoint-sync-url
-LIGHTHOUSE_CHECKPOINT_SYNC_URL="https://beaconstate.ethstaker.cc"
-
-# --checkpoint-sync-url-timeout
-LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT=300
-
-# --datadir
-LIGHTHOUSE_DATADIR=/data
-
-# --debug-level
-LIGHTHOUSE_DEBUG_LEVEL=info
-
-# --http-port
-LIGHTHOUSE_HTTP_PORT=5052
-
-# --execution-jwt
-LIGHTHOUSE_JWTSECRET=/etc/mainnet-eth/jwtsecret
-
-# --metrics-port
-LIGHTHOUSE_METRICS_PORT=5054
-
-# --port --enr-udp-port --enr-tcp-port
-LIGHTHOUSE_NETWORK_PORT=9000
diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh b/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh
deleted file mode 100755
index 1971c2d0..00000000
--- a/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then
- set -x
-fi
-
-START_CMD="geth"
-if [[ "true" == "$CERC_REMOTE_DEBUG" ]] && [[ -x "/usr/local/bin/dlv" ]]; then
- START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
-fi
-
-# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
-cleanup() {
- echo "Signal received, cleaning up..."
-
- # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
- pkill -P ${geth_pid}
- sleep 2
- kill $(jobs -p)
-
- wait
- echo "Done"
-}
-trap 'cleanup' SIGINT SIGTERM
-
-MODE_FLAGS=""
-if [[ "$CERC_GETH_MODE_QUICK_SET" = "archive" ]]; then
- MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-full} --gcmode=${GETH_GC_MODE:-archive} --snapshot=${GETH_SNAPSHOT:-false}"
-else
- MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-snap} --gcmode=${GETH_GC_MODE:-full} --snapshot=${GETH_SNAPSHOT:-true}"
-fi
-
-if [[ "${CERC_RUN_STATEDIFF}" == "detect" ]] && [[ -n "$CERC_STATEDIFF_DB_HOST" ]]; then
- dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short)
- dig_status_code=$?
- if [[ $dig_status_code = 0 && -n $dig_result ]]; then
- echo "Statediff DB at $CERC_STATEDIFF_DB_HOST"
- CERC_RUN_STATEDIFF="true"
- else
- echo "No statediff DB available."
- CERC_RUN_STATEDIFF="false"
- fi
-fi
-
-STATEDIFF_OPTS=""
-if [[ "${CERC_RUN_STATEDIFF}" == "true" ]]; then
- ready=0
- echo "Waiting for statediff DB..."
- while [ $ready -eq 0 ]; do
- sleep 1
- export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
- result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
- -p "$CERC_STATEDIFF_DB_PORT" \
- -U "$CERC_STATEDIFF_DB_USER" \
- -d "$CERC_STATEDIFF_DB_NAME" \
- -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
- if [ -n "$result" ]; then
- echo "DB ready..."
- if [[ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]]; then
- ready=1
- else
- echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
- fi
- fi
- done
-
- STATEDIFF_OPTS="--statediff \
- --statediff.db.host=$CERC_STATEDIFF_DB_HOST \
- --statediff.db.name=$CERC_STATEDIFF_DB_NAME \
- --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
- --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \
- --statediff.db.port=$CERC_STATEDIFF_DB_PORT \
- --statediff.db.user=$CERC_STATEDIFF_DB_USER \
- --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
- --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
- --statediff.waitforsync=${CERC_STATEDIFF_WAIT_FO_SYNC:-true} \
- --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
- --statediff.writing=${CERC_STATEDIFF_WRITING:-true}"
-
- if [[ -d "${CERC_PLUGINS_DIR}" ]]; then
- # With plugeth, we separate the statediff options by prefixing with ' -- '
- STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
- fi
-fi
-
-$START_CMD \
- $MODE_FLAGS \
- --datadir="${GETH_DATADIR}"\
- --identity="${GETH_NODE_NAME}" \
- --maxpeers=${GETH_MAX_PEERS} \
- --cache=${GETH_CACHE} \
- --cache.gc=${GETH_CACHE_GC} \
- --cache.database=${GETH_CACHE_DB} \
- --cache.trie=${GETH_CACHE_TRIE} \
- --authrpc.addr='0.0.0.0' \
- --authrpc.vhosts='*' \
- --authrpc.jwtsecret="${GETH_JWTSECRET}" \
- --http \
- --http.addr='0.0.0.0' \
- --http.api="${GETH_HTTP_API}" \
- --http.vhosts='*' \
- --metrics \
- --metrics.addr='0.0.0.0' \
- --ws \
- --ws.addr='0.0.0.0' \
- --ws.api="${GETH_WS_API}" \
- --rpc.gascap=${GETH_RPC_GASCAP} \
- --rpc.evmtimeout=${GETH_RPC_EVMTIMEOUT} \
- --txlookuplimit=${GETH_TXLOOKUPLIMIT} \
- --verbosity=${GETH_VERBOSITY} \
- --log.vmodule="${GETH_VMODULE}" \
- ${STATEDIFF_OPTS} \
- ${GETH_OPTS} &
-
-geth_pid=$!
-wait $geth_pid
-
-if [[ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]]; then
- while [[ 1 -eq 1 ]]; do
- sleep 60
- done
-fi
diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh b/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh
deleted file mode 100755
index efda735b..00000000
--- a/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then
- set -x
-fi
-
-ENR_OPTS=""
-if [[ -n "$LIGHTHOUSE_ENR_ADDRESS" ]]; then
- ENR_OPTS="--enr-address $LIGHTHOUSE_ENR_ADDRESS"
-fi
-
-exec lighthouse bn \
- --checkpoint-sync-url "$LIGHTHOUSE_CHECKPOINT_SYNC_URL" \
- --checkpoint-sync-url-timeout ${LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT} \
- --datadir "$LIGHTHOUSE_DATADIR" \
- --debug-level $LIGHTHOUSE_DEBUG_LEVEL \
- --disable-deposit-contract-sync \
- --disable-upnp \
- --enr-tcp-port $LIGHTHOUSE_NETWORK_PORT \
- --enr-udp-port $LIGHTHOUSE_NETWORK_PORT \
- --execution-endpoint "$LIGHTHOUSE_EXECUTION_ENDPOINT" \
- --execution-jwt /etc/mainnet-eth/jwtsecret \
- --http \
- --http-address 0.0.0.0 \
- --http-port $LIGHTHOUSE_HTTP_PORT \
- --metrics \
- --metrics-address=0.0.0.0 \
- --metrics-port $LIGHTHOUSE_METRICS_PORT \
- --network mainnet \
- --port $LIGHTHOUSE_NETWORK_PORT \
- $ENR_OPTS $LIGHTHOUSE_OPTS
diff --git a/app/data/config/mainnet-eth/geth.env b/app/data/config/mainnet-eth/geth.env
index 365bb5fb..a01444df 100644
--- a/app/data/config/mainnet-eth/geth.env
+++ b/app/data/config/mainnet-eth/geth.env
@@ -25,7 +25,7 @@ GETH_CACHE_GC=25
# --cache.trie
GETH_CACHE_TRIE=15
-
+j
# --datadir
GETH_DATADIR="/data"
diff --git a/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile b/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile
deleted file mode 100644
index 87d050ea..00000000
--- a/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile
+++ /dev/null
@@ -1,22 +0,0 @@
-# Using the same golang image as used to build plugeth: https://git.vdb.to/cerc-io/plugeth/src/branch/statediff/Dockerfile
-FROM golang:1.20-alpine3.18 as delve
-
-# Add delve so that we can do remote debugging.
-RUN go install github.com/go-delve/delve/cmd/dlv@latest
-
-FROM cerc/plugeth-statediff:local as statediff
-FROM cerc/plugeth:local as plugeth
-
-FROM alpine:3.18
-
-# Install tools often used in scripting, like bash, wget, and jq.
-RUN apk add --no-cache ca-certificates bash wget curl python3 bind-tools postgresql-client jq
-
-COPY --from=delve /go/bin/dlv /usr/local/bin/
-COPY --from=plugeth /usr/local/bin/geth /usr/local/bin/
-
-# Place all plugeth plugins in /usr/local/lib/plugeth
-COPY --from=statediff /usr/local/lib/statediff.so /usr/local/lib/plugeth/
-
-EXPOSE 8545 8546 8551 6060 30303 30303/udp 40000
-ENTRYPOINT ["geth"]
diff --git a/app/data/container-build/cerc-plugeth-with-plugins/build.sh b/app/data/container-build/cerc-plugeth-with-plugins/build.sh
deleted file mode 100755
index 9ab44946..00000000
--- a/app/data/container-build/cerc-plugeth-with-plugins/build.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-# Build cerc/cerc-plugeth-with-plugins
-set -x
-
-source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
-
-SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
-
-docker build -t cerc/plugeth-with-plugins:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
diff --git a/app/data/stacks/chain-chunker/stack.yml b/app/data/stacks/chain-chunker/stack.yml
index 2705f69a..d85aa057 100644
--- a/app/data/stacks/chain-chunker/stack.yml
+++ b/app/data/stacks/chain-chunker/stack.yml
@@ -6,10 +6,8 @@ repos:
- git.vdb.to/cerc-io/eth-statediff-service@v5
- git.vdb.to/cerc-io/ipld-eth-db@v5
- git.vdb.to/cerc-io/ipld-eth-server@v5
- - git.vdb.to/cerc-io/plugeth@statediff
containers:
- cerc/ipld-eth-state-snapshot
- cerc/eth-statediff-service
- cerc/ipld-eth-db
- cerc/ipld-eth-server
- - cerc/plugeth
diff --git a/app/data/stacks/mainnet-eth-plugeth/README.md b/app/data/stacks/mainnet-eth-plugeth/README.md
deleted file mode 100644
index 8ed6bebb..00000000
--- a/app/data/stacks/mainnet-eth-plugeth/README.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# mainnet-eth-plugeth
-
-Deploys a "head-tracking" mainnet Ethereum stack comprising a [plugeth](https://git.vdb.to/cerc-io/plugeth) execution layer node and a [lighthouse](https://github.com/sigp/lighthouse) consensus layer node, with [plugeth-statediff](https://git.vdb.to/cerc-io/plugeth-statediff) for statediffing, [ipld-eth-db](https://git.vdb.to/cerc-io/ipld-eth-db) for storage, and [ipld-eth-server](https://git.vdb.to/cerc-io/ipld-eth-server) for indexed ETH IPLD objects.
-
-## Clone required repositories
-
-```
-$ laconic-so --stack mainnet-eth-plugeth setup-repositories
-```
-
-## Build containers
-
-```
-$ laconic-so --stack mainnet-eth-plugeth build-containers
-```
-
-## Create a deployment
-
-```
-$ laconic-so --stack mainnet-eth-plugeth deploy init --map-ports-to-host any-same --output mainnet-eth-plugeth-spec.yml
-$ laconic-so --stack mainnet-eth-plugeth deploy create --spec-file mainnet-eth-plugeth-spec.yml --deployment-dir mainnet-eth-plugeth-deployment
-```
-## Start the stack
-```
-$ laconic-so deployment --dir mainnet-eth-plugeth-deployment start
-```
-Display stack status:
-```
-$ laconic-so deployment --dir mainnet-eth-plugeth-deployment ps
-Running containers:
-id: f39608eca04d72d6b0f1f3acefc5ebb52908da06e221d20c7138f7e3dff5e423, name: laconic-ef641b4d13eb61ed561b19be67063241-foundry-1, ports:
-id: 4052b1eddd886ae0d6b41f9ff22e68a70f267b2bfde10f4b7b79b5bd1eeddcac, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-geth-1-1, ports: 30303/tcp, 30303/udp, 0.0.0.0:49184->40000/tcp, 0.0.0.0:49185->6060/tcp, 0.0.0.0:49186->8545/tcp, 8546/tcp
-id: ac331232e597944b621b3b8942ace5dafb14524302cab338ff946c7f6e5a1d52, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1, ports: 0.0.0.0:49187->8001/tcp
-```
-See stack logs:
-```
-$ laconic-so deployment --dir mainnet-eth-plugeth-deployment logs
-time="2023-07-25T09:46:29-06:00" level=warning msg="The \"CERC_SCRIPT_DEBUG\" variable is not set. Defaulting to a blank string."
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.362 INFO Logging to file path: "/var/lighthouse-data-dir/beacon/logs/beacon.log"
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Lighthouse started version: Lighthouse/v4.1.0-693886b
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Configured for network name: mainnet
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Data directory initialised datadir: /var/lighthouse-data-dir
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Deposit contract address: 0x00000000219ab540356cbb839cbe05303d7705fa, deploy_block: 11184524
-laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.424 INFO Starting checkpoint sync remote_url: https://beaconstate.ethstaker.cc/, service: beacon
-```
-## Monitoring stack sync progress
-Both go-ethereum and lighthouse will engage in an initial chain sync phase that will last up to several hours depending on hardware performance and network capacity.
-Syncing can be monitored by looking for these log messages:
-```
-Jul 24 12:34:17.001 INFO Downloading historical blocks est_time: 5 days 11 hrs, speed: 14.67 slots/sec, distance: 6932481 slots (137 weeks 3 days), service: slot_notifier
-INFO [07-24|12:14:52.493] Syncing beacon headers downloaded=145,920 left=17,617,968 eta=1h23m32.815s
-INFO [07-24|12:33:15.238] Syncing: chain download in progress synced=1.86% chain=148.94MiB headers=368,640@95.03MiB bodies=330,081@40.56MiB receipts=330,081@13.35MiB eta=37m54.505s
-INFO [07-24|12:35:13.028] Syncing: state download in progress synced=1.32% state=4.64GiB accounts=2,850,314@677.57MiB slots=18,663,070@3.87GiB codes=26662@111.14MiB eta=3h18m0.699s
-```
-Once synced up these log messages will be observed:
-```
-INFO Synced slot: 6952515, block: 0x5bcb…f6d9, epoch: 217266, finalized_epoch: 217264, finalized_root: 0x6342…2c5c, exec_hash: 0x8d8c…2443 (verified), peers: 31, service: slot_notifier
-INFO [07-25|03:04:48.941] Imported new potential chain segment number=17,767,316 hash=84f6e7..bc2cb0 blocks=1 txs=137 mgas=16.123 elapsed=57.087ms mgasps=282.434 dirty=461.46MiB
-INFO [07-25|03:04:49.042] Chain head was updated number=17,767,316 hash=84f6e7..bc2cb0 root=ca58b2..8258c1 elapsed=2.480111ms
-```
-## Clean up
-
-Stop the stack:
-```
-$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop
-```
-This leaves data volumes in place, allowing the stack to be subsequently re-started.
-To permanently *delete* the stack's data volumes run:
-```
-$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop --delete-data-volumes
-```
-After deleting the volumes, any subsequent re-start will begin chain sync from cold.
-
-## Ports
-It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
-Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be
-customized by editing the "spec" file generated by `laconic-so deploy init`.
-
-In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while
-port `40000` has been left with random assignment:
-```
-$ cat mainnet-eth-plugeth-spec.yml
-stack: mainnet-eth-plugeth
-ports:
- mainnet-eth-plugeth-geth-1:
- - '10.10.10.10:8545:8545'
- - '40000'
- mainnet-eth-plugeth-lighthouse-1:
- - '10.10.10.10:5052:5052'
-volumes:
- mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data
- mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data
- mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data
-```
-In addition, a stack-wide port mapping "recipe" can be applied at the time the
-`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
-| Recipe | Host Port Mapping |
-|--------|-------------------|
-| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) |
-| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers |
-| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers |
-| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)|
-| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) |
-## Data volumes
-Container data volumes are bind-mounted to specified paths in the host filesystem.
-The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
-```
-$ cat mainnet-eth-plugeth-spec.yml
-stack: mainnet-eth-plugeth
-ports:
- mainnet-eth-plugeth-geth-1:
- - '10.10.10.10:8545:8545'
- - '40000'
- mainnet-eth-plugeth-lighthouse-1:
- - '10.10.10.10:5052:5052'
-volumes:
- mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data
- mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data
- mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data
-```
-A synced-up stack will consume around 900GB of data volume space:
-```
-$ sudo du -h mainnet-eth-plugeth-deployment/data/
-150M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/freezer_db
-25G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/chain_db
-16K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/network
-368M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/logs
-26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon
-26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data
-8.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_config_data
-4.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/keystore
-527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient/chain
-527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient
-859G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata
-4.8M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/nodes
-242M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/ethash
-669M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/triecache
-860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth
-860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data
-885G mainnet-eth-plugeth-deployment/data/
-```
diff --git a/app/data/stacks/mainnet-eth-plugeth/stack.yml b/app/data/stacks/mainnet-eth-plugeth/stack.yml
deleted file mode 100644
index 7ade244c..00000000
--- a/app/data/stacks/mainnet-eth-plugeth/stack.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-version: "1.2"
-name: mainnet-eth
-description: "Ethereum Mainnet"
-repos:
- - git.vdb.to/cerc-io/plugeth@statediff
- - git.vdb.to/cerc-io/plugeth-statediff
- - git.vdb.to/cerc-io/lighthouse
- - git.vdb.to/cerc-io/ipld-eth-db@v5
- - git.vdb.to/cerc-io/ipld-eth-server@v5
- - git.vdb.to/cerc-io/keycloak-reg-api
- - git.vdb.to/cerc-io/keycloak-reg-ui
-containers:
- - cerc/plugeth-statediff
- - cerc/plugeth
- - cerc/plugeth-with-plugins
- - cerc/lighthouse
- - cerc/lighthouse-cli
- - cerc/ipld-eth-db
- - cerc/ipld-eth-server
- - cerc/keycloak
- - cerc/webapp-base
- - cerc/keycloak-reg-api
- - cerc/keycloak-reg-ui
-pods:
- - mainnet-eth-plugeth
- - mainnet-eth-ipld-eth-db
- - mainnet-eth-ipld-eth-server
- - mainnet-eth-keycloak
- - mainnet-eth-metrics
diff --git a/app/deploy/k8s/cluster_info.py b/app/deploy/k8s/cluster_info.py
new file mode 100644
index 00000000..540f5f8c
--- /dev/null
+++ b/app/deploy/k8s/cluster_info.py
@@ -0,0 +1,84 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from kubernetes import client
+from typing import Any, List, Set
+
+from app.opts import opts
+from app.util import get_yaml
+
+
+class ClusterInfo:
+ parsed_pod_yaml_map: Any = {}
+ image_set: Set[str] = set()
+ app_name: str = "test-app"
+ deployment_name: str = "test-deployment"
+
+ def __init__(self) -> None:
+ pass
+
+ def int_from_pod_files(self, pod_files: List[str]):
+ for pod_file in pod_files:
+ with open(pod_file, "r") as pod_file_descriptor:
+ parsed_pod_file = get_yaml().load(pod_file_descriptor)
+ self.parsed_pod_yaml_map[pod_file] = parsed_pod_file
+ if opts.o.debug:
+ print(f"parsed_pod_yaml_map: {self.parsed_pod_yaml_map}")
+ # Find the set of images in the pods
+ for pod_name in self.parsed_pod_yaml_map:
+ pod = self.parsed_pod_yaml_map[pod_name]
+ services = pod["services"]
+ for service_name in services:
+ service_info = services[service_name]
+ image = service_info["image"]
+ self.image_set.add(image)
+ if opts.o.debug:
+ print(f"image_set: {self.image_set}")
+
+ def get_deployment(self):
+ containers = []
+ for pod_name in self.parsed_pod_yaml_map:
+ pod = self.parsed_pod_yaml_map[pod_name]
+ services = pod["services"]
+ for service_name in services:
+ container_name = service_name
+ service_info = services[service_name]
+ image = service_info["image"]
+ container = client.V1Container(
+ name=container_name,
+ image=image,
+ ports=[client.V1ContainerPort(container_port=80)],
+ resources=client.V1ResourceRequirements(
+ requests={"cpu": "100m", "memory": "200Mi"},
+ limits={"cpu": "500m", "memory": "500Mi"},
+ ),
+ )
+ containers.append(container)
+ template = client.V1PodTemplateSpec(
+ metadata=client.V1ObjectMeta(labels={"app": self.app_name}),
+ spec=client.V1PodSpec(containers=containers),
+ )
+ spec = client.V1DeploymentSpec(
+ replicas=1, template=template, selector={
+ "matchLabels":
+ {"app": self.app_name}})
+
+ deployment = client.V1Deployment(
+ api_version="apps/v1",
+ kind="Deployment",
+ metadata=client.V1ObjectMeta(name=self.deployment_name),
+ spec=spec,
+ )
+ return deployment
diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py
index 7cf0261d..e67f3974 100644
--- a/app/deploy/k8s/deploy_k8s.py
+++ b/app/deploy/k8s/deploy_k8s.py
@@ -14,33 +14,86 @@
# along with this program. If not, see .
from kubernetes import client, config
+
from app.deploy.deployer import Deployer
+from app.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
+from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string
+from app.deploy.k8s.cluster_info import ClusterInfo
+from app.opts import opts
class K8sDeployer(Deployer):
name: str = "k8s"
+ core_api: client.CoreV1Api
+ apps_api: client.AppsV1Api
+ kind_cluster_name: str
+ cluster_info : ClusterInfo
def __init__(self, compose_files, compose_project_name, compose_env_file) -> None:
- config.load_kube_config()
- self.client = client.CoreV1Api()
+ if (opts.o.debug):
+ print(f"Compose files: {compose_files}")
+ print(f"Project name: {compose_project_name}")
+ print(f"Env file: {compose_env_file}")
+ self.kind_cluster_name = compose_project_name
+ self.cluster_info = ClusterInfo()
+ self.cluster_info.int_from_pod_files(compose_files)
+
+ def connect_api(self):
+ config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
+ self.core_api = client.CoreV1Api()
+ self.apps_api = client.AppsV1Api()
def up(self, detach, services):
- pass
+ # Create the kind cluster
+ create_cluster(self.kind_cluster_name)
+ self.connect_api()
+ # Ensure the referenced containers are copied into kind
+ load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
+ # Process compose files into a Deployment
+ deployment = self.cluster_info.get_deployment()
+ # Create the k8s objects
+ resp = self.apps_api.create_namespaced_deployment(
+ body=deployment, namespace="default"
+ )
+
+ if opts.o.debug:
+ print("Deployment created.\n")
+ print(f"{resp.metadata.namespace} {resp.metadata.name} \
+ {resp.metadata.generation} {resp.spec.template.spec.containers[0].image}")
def down(self, timeout, volumes):
- pass
+ # Delete the k8s objects
+ # Destroy the kind cluster
+ destroy_cluster(self.kind_cluster_name)
def ps(self):
- pass
+ self.connect_api()
+ # Call whatever API we need to get the running container list
+ ret = self.core_api.list_pod_for_all_namespaces(watch=False)
+ if ret.items:
+ for i in ret.items:
+ print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
+ ret = self.core_api.list_node(pretty=True, watch=False)
+ return []
def port(self, service, private_port):
+ # Since we handle the port mapping, need to figure out where this comes from
+ # Also look into whether it makes sense to get ports for k8s
pass
def execute(self, service_name, command, envs):
+ # Call the API to execute a command in a running container
pass
def logs(self, services, tail, follow, stream):
- pass
+ self.connect_api()
+ pods = pods_in_deployment(self.core_api, "test-deployment")
+ if len(pods) > 1:
+ print("Warning: more than one pod in the deployment")
+ k8s_pod_name = pods[0]
+ log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test")
+ return log_stream_from_string(log_data)
def run(self, image, command, user, volumes, entrypoint=None):
+ # We need to figure out how to do this -- check why we're being called first
pass
diff --git a/app/deploy/k8s/helpers.py b/app/deploy/k8s/helpers.py
new file mode 100644
index 00000000..731d667d
--- /dev/null
+++ b/app/deploy/k8s/helpers.py
@@ -0,0 +1,57 @@
+# Copyright © 2023 Vulcanize
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from kubernetes import client
+import subprocess
+from typing import Set
+
+from app.opts import opts
+
+
+def _run_command(command: str):
+ if opts.o.debug:
+ print(f"Running: {command}")
+ result = subprocess.run(command, shell=True)
+ if opts.o.debug:
+ print(f"Result: {result}")
+
+
+def create_cluster(name: str):
+ _run_command(f"kind create cluster --name {name}")
+
+
+def destroy_cluster(name: str):
+ _run_command(f"kind delete cluster --name {name}")
+
+
+def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
+ for image in image_set:
+ _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
+
+
+def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
+ pods = []
+ pod_response = core_api.list_namespaced_pod(namespace="default", label_selector="app=test-app")
+ if opts.o.debug:
+ print(f"pod_response: {pod_response}")
+ for pod_info in pod_response.items:
+ pod_name = pod_info.metadata.name
+ pods.append(pod_name)
+ return pods
+
+
+def log_stream_from_string(s: str):
+ # Note response has to be UTF-8 encoded because the caller expects to decode it
+ yield ("ignore", s.encode())
diff --git a/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py b/app/opts.py
similarity index 63%
rename from app/data/stacks/mainnet-eth-plugeth/deploy/commands.py
rename to app/opts.py
index 5aba9547..193637c2 100644
--- a/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py
+++ b/app/opts.py
@@ -13,20 +13,8 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from secrets import token_hex
+from app.command_types import CommandOptions
-def init(ctx):
- return None
-
-
-def setup(ctx):
- return None
-
-
-def create(ctx, extra_args):
- # Generate the JWT secret and save to its config file
- secret = token_hex(32)
- jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret")
- with open(jwt_file_path, 'w+') as jwt_file:
- jwt_file.write(secret)
+class opts:
+ o: CommandOptions = None
diff --git a/cli.py b/cli.py
index 5dea43ca..38bdddd9 100644
--- a/cli.py
+++ b/cli.py
@@ -22,6 +22,7 @@ from app.build import build_npms
from app.deploy import deploy
from app import version
from app.deploy import deployment
+from app import opts
from app import update
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@@ -39,7 +40,9 @@ CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.pass_context
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
"""Laconic Stack Orchestrator"""
- ctx.obj = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
+ command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
+ opts.opts.o = command_options
+ ctx.obj = command_options
cli.add_command(setup_repositories.command, "setup-repositories")