From 20d633f81c75e3cbd17ae357e1ea9c0274d02c15 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 25 Oct 2023 14:42:52 -0500 Subject: [PATCH 01/62] Plugeth-based full mainnet stack. (#592) * Plugeth-based full mainnet stack. --------- Co-authored-by: David Boreham --- ...docker-compose-mainnet-eth-ipld-eth-db.yml | 29 ++++ ...er-compose-mainnet-eth-ipld-eth-server.yml | 24 +++ .../docker-compose-mainnet-eth-keycloak.yml | 4 +- .../docker-compose-mainnet-eth-plugeth.yml | 72 +++++++++ .../config/mainnet-eth-ipld-eth-db/db.env | 15 ++ .../mainnet-eth-ipld-eth-server/config.toml | 33 ++++ .../mainnet-eth-ipld-eth-server/srv.env | 27 ++++ .../config/mainnet-eth-keycloak/keycloak.env | 3 + .../config/mainnet-eth-keycloak/nginx.example | 42 ++++-- app/data/config/mainnet-eth-plugeth/geth.env | 75 ++++++++++ .../config/mainnet-eth-plugeth/lighthouse.env | 33 ++++ .../mainnet-eth-plugeth/scripts/run-geth.sh | 121 +++++++++++++++ .../scripts/run-lighthouse.sh | 30 ++++ app/data/config/mainnet-eth/geth.env | 2 +- .../cerc-plugeth-with-plugins/Dockerfile | 22 +++ .../cerc-plugeth-with-plugins/build.sh | 9 ++ app/data/stacks/mainnet-eth-plugeth/README.md | 141 ++++++++++++++++++ .../mainnet-eth-plugeth/deploy/commands.py | 32 ++++ app/data/stacks/mainnet-eth-plugeth/stack.yml | 29 ++++ 19 files changed, 724 insertions(+), 19 deletions(-) create mode 100644 app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml create mode 100644 app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml create mode 100644 app/data/compose/docker-compose-mainnet-eth-plugeth.yml create mode 100644 app/data/config/mainnet-eth-ipld-eth-db/db.env create mode 100644 app/data/config/mainnet-eth-ipld-eth-server/config.toml create mode 100644 app/data/config/mainnet-eth-ipld-eth-server/srv.env create mode 100644 app/data/config/mainnet-eth-plugeth/geth.env create mode 100644 app/data/config/mainnet-eth-plugeth/lighthouse.env create mode 100755 app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh create mode 100755 app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh create mode 100644 app/data/container-build/cerc-plugeth-with-plugins/Dockerfile create mode 100755 app/data/container-build/cerc-plugeth-with-plugins/build.sh create mode 100644 app/data/stacks/mainnet-eth-plugeth/README.md create mode 100644 app/data/stacks/mainnet-eth-plugeth/deploy/commands.py create mode 100644 app/data/stacks/mainnet-eth-plugeth/stack.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml new file mode 100644 index 00000000..49cc2de3 --- /dev/null +++ b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml @@ -0,0 +1,29 @@ +version: "3.2" + +services: + migrations: + restart: on-failure + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-db:local + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + + ipld-eth-db: + image: timescale/timescaledb:2.8.1-pg14 + restart: always + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + volumes: + - mainnet_eth_ipld_eth_db:/var/lib/postgresql/data + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + - "5432" +volumes: + mainnet_eth_ipld_eth_db: diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml new file mode 100644 index 00000000..4341c6a1 --- /dev/null +++ b/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml @@ -0,0 +1,24 @@ +version: "3.7" +services: + ipld-eth-server: + restart: always + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-server:local + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + - ../config/mainnet-eth-ipld-eth-server/srv.env + volumes: + - ../config/mainnet-eth-ipld-eth-server/config.toml:/app/config.toml:ro + ports: + - "8081" + - "8082" + - "8090" + - "40001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8081"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s diff --git a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml b/app/data/compose/docker-compose-mainnet-eth-keycloak.yml index dfa9a804..1674c62e 100644 --- a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml +++ b/app/data/compose/docker-compose-mainnet-eth-keycloak.yml @@ -6,7 +6,7 @@ services: env_file: - ../config/mainnet-eth-keycloak/keycloak.env healthcheck: - test: ["CMD", "nc", "-v", "localhost", "5432"] + test: ["CMD", "nc", "-v", "localhost", "35432"] interval: 30s timeout: 10s retries: 10 @@ -14,7 +14,7 @@ services: volumes: - mainnet_eth_keycloak_db:/var/lib/postgresql/data ports: - - 5432 + - 35432 keycloak: image: cerc/keycloak:local diff --git a/app/data/compose/docker-compose-mainnet-eth-plugeth.yml b/app/data/compose/docker-compose-mainnet-eth-plugeth.yml new file mode 100644 index 00000000..a8b301d2 --- /dev/null +++ b/app/data/compose/docker-compose-mainnet-eth-plugeth.yml @@ -0,0 +1,72 @@ + +services: + + mainnet-eth-geth-1: + restart: always + hostname: mainnet-eth-geth-1 + cap_add: + - SYS_PTRACE + image: cerc/plugeth-with-plugins:local + entrypoint: /bin/sh + command: -c "/opt/run-geth.sh" + env_file: + - ../config/mainnet-eth-ipld-eth-db/db.env + - ../config/mainnet-eth-plugeth/geth.env + volumes: + - mainnet_eth_plugeth_geth_1_data:/data + - mainnet_eth_plugeth_config_data:/etc/mainnet-eth + - ../config/mainnet-eth-plugeth/scripts/run-geth.sh:/opt/run-geth.sh + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8545"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + # http api + - "8545" + # ws api + - "8546" + # ws el + - "8551" + # p2p + - "30303" + - "30303/udp" + # debugging + - "40000" + # metrics + - "6060" + + mainnet-eth-lighthouse-1: + restart: always + hostname: mainnet-eth-lighthouse-1 + healthcheck: + test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:5052/eth/v2/beacon/blocks/head"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 30s + environment: + LIGHTHOUSE_EXECUTION_ENDPOINT: "http://mainnet-eth-geth-1:8551" + env_file: + - ../config/mainnet-eth-plugeth/lighthouse.env + image: cerc/lighthouse:local + entrypoint: /bin/sh + command: -c "/opt/run-lighthouse.sh" + volumes: + - mainnet_eth_plugeth_lighthouse_1_data:/data + - mainnet_eth_plugeth_config_data:/etc/mainnet-eth + - ../config/mainnet-eth-plugeth/scripts/run-lighthouse.sh:/opt/run-lighthouse.sh + ports: + # api + - "5052" + # metrics + - "5054" + # p2p + - "9000" + - "9000/udp" + +volumes: + mainnet_eth_plugeth_config_data: + mainnet_eth_plugeth_geth_1_data: + mainnet_eth_plugeth_lighthouse_1_data: diff --git a/app/data/config/mainnet-eth-ipld-eth-db/db.env b/app/data/config/mainnet-eth-ipld-eth-db/db.env new file mode 100644 index 00000000..4ec11109 --- /dev/null +++ b/app/data/config/mainnet-eth-ipld-eth-db/db.env @@ -0,0 +1,15 @@ +DATABASE_HOSTNAME="ipld-eth-db" +DATABASE_NAME="cerc" +DATABASE_PASSWORD="CHANGEME" +DATABASE_PORT=5432 +DATABASE_USER="vdbm" + +POSTGRES_DB="${DATABASE_NAME}" +POSTGRES_PASSWORD="${DATABASE_PASSWORD}" +POSTGRES_USER="${DATABASE_USER}" + +CERC_STATEDIFF_DB_HOST="${DATABASE_HOSTNAME}" +CERC_STATEDIFF_DB_NAME="${DATABASE_NAME}" +CERC_STATEDIFF_DB_PASSWORD="${DATABASE_PASSWORD}" +CERC_STATEDIFF_DB_PORT=${DATABASE_PORT} +CERC_STATEDIFF_DB_USER="${DATABASE_USER}" diff --git a/app/data/config/mainnet-eth-ipld-eth-server/config.toml b/app/data/config/mainnet-eth-ipld-eth-server/config.toml new file mode 100644 index 00000000..c433df28 --- /dev/null +++ b/app/data/config/mainnet-eth-ipld-eth-server/config.toml @@ -0,0 +1,33 @@ +[database] + name = "" # $DATABASE_NAME + hostname = "" # $DATABASE_HOSTNAME + port = 5432 # $DATABASE_PORT + user = "" # $DATABASE_USER + password = "" # $DATABASE_PASSWORD + +[log] + level = "info" # $LOG_LEVEL + +[server] + ipc = false + ipcPath = "" # $SERVER_IPC_PATH + ws = false + wsPath = "0.0.0.0:8080" # $SERVER_WS_PATH + http = true + httpPath = "0.0.0.0:8081" # $SERVER_HTTP_PATH + graphql = false # $SERVER_GRAPHQL + graphqlPath = "0.0.0.0:8082" # $SERVER_GRAPHQL_PATH + +[ethereum] + chainConfig = "" # ETH_CHAIN_CONFIG + chainID = "1" # $ETH_CHAIN_ID + rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP + httpPath = "mainnet-eth-geth-1:8545" # $ETH_HTTP_PATH + supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF + stateDiffTimeout = "4m" # $ETH_STATEDIFF_TIMEOUT + forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS + proxyOnError = true # $ETH_PROXY_ON_ERROR + nodeID = "" # $ETH_NODE_ID + clientName = "" # $ETH_CLIENT_NAME + genesisBlock = "" # $ETH_GENESIS_BLOCK + networkID = "1" # $ETH_NETWORK_ID diff --git a/app/data/config/mainnet-eth-ipld-eth-server/srv.env b/app/data/config/mainnet-eth-ipld-eth-server/srv.env new file mode 100644 index 00000000..34c79ce4 --- /dev/null +++ b/app/data/config/mainnet-eth-ipld-eth-server/srv.env @@ -0,0 +1,27 @@ +CERC_REMOTE_DEBUG="false" + +LOG_LEVEL="debug" + +ETH_CHAIN_ID=1 +ETH_CLIENT_NAME="Geth" +ETH_FORWARD_ETH_CALLS="false" +ETH_FORWARD_GET_STORAGE_AT="false" +ETH_GENESIS_BLOCK="0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" +ETH_HTTP_PATH="mainnet-eth-geth-1:8545" +ETH_NETWORK_ID=1 +ETH_NODE_ID=1112 +ETH_PROXY_ON_ERROR="true" +ETH_RPC_GAS_CAP=1000000000000 +ETH_SUPPORTS_STATEDIFF="true" +ETH_STATEDIFF_TIMEOUT=4m + +SERVER_HTTP_PATH=0.0.0.0:8081 +SERVER_GRAPHQL="false" +SERVER_GRAPHQLPATH=0.0.0.0:8082 + +METRICS="true" +PROM_HTTP="true" +PROM_HTTP_ADDR="0.0.0.0" +PROM_HTTP_PORT="8090" + +VDB_COMMAND="serve" diff --git a/app/data/config/mainnet-eth-keycloak/keycloak.env b/app/data/config/mainnet-eth-keycloak/keycloak.env index f37fdd30..31a19079 100644 --- a/app/data/config/mainnet-eth-keycloak/keycloak.env +++ b/app/data/config/mainnet-eth-keycloak/keycloak.env @@ -1,8 +1,11 @@ POSTGRES_DB=keycloak POSTGRES_USER=keycloak POSTGRES_PASSWORD=keycloak +# Don't change this unless you also change the healthcheck in docker-compose-mainnet-eth-keycloak.yml +PGPORT=35432 KC_DB=postgres KC_DB_URL_HOST=keycloak-db +KC_DB_URL_PORT=${PGPORT} KC_DB_URL_DATABASE=${POSTGRES_DB} KC_DB_USERNAME=${POSTGRES_USER} KC_DB_PASSWORD=${POSTGRES_PASSWORD} diff --git a/app/data/config/mainnet-eth-keycloak/nginx.example b/app/data/config/mainnet-eth-keycloak/nginx.example index 67095551..758f0ce1 100644 --- a/app/data/config/mainnet-eth-keycloak/nginx.example +++ b/app/data/config/mainnet-eth-keycloak/nginx.example @@ -15,42 +15,49 @@ server { } upstream geth-pool { - keepalive 100; - hash $user_id consistent; - server server-a:8545; - server server-b:8545; - server server-c:8545; + server server-a:8545 max_fails=10 fail_timeout=2s; + server server-c:8545 max_fails=10 fail_timeout=2s backup; + server server-b:8545 max_fails=10 fail_timeout=2s backup; + keepalive 200; } -# self-reg happens on one server for clarity upstream reg-ui-pool { - keepalive 100; + keepalive 2; server server-a:8085; } upstream reg-api-pool { - keepalive 100; + keepalive 2; server server-a:8086; } -# auth uses server-a if available +# auth uses the reg server when available upstream auth-pool { - keepalive 100; + keepalive 10; server server-a:8080; server server-b:8080 backup; server server-c:8080 backup; } -log_format upstreamlog '[$time_local] $remote_addr $user_id - $server_name $host to: $upstream_addr: $request $status upstream_response_time $upstream_response_time msec $msec request_time $request_time'; -proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m; + +log_format upstreamlog '[$time_local] $msec $remote_addr $user_id - $server_name($host) to $upstream_addr: $request $status upstream_response_time $upstream_response_time request_time $request_time'; +proxy_cache_path /var/cache/nginx/auth_cache levels=1 keys_zone=auth_cache:1m max_size=5m inactive=60m; + server { listen 443 ssl http2; server_name my.example.com; + keepalive_requests 500000; + keepalive_timeout 90s; + http2_max_requests 5000000; + http2_max_concurrent_streams 1024; + http2_idle_timeout 3m; + http2_recv_timeout 30s; access_log /var/log/nginx/my.example.com-access.log upstreamlog; error_log /var/log/nginx/my.example.com-error.log; ssl_certificate /etc/nginx/ssl/my.example.com/cert.pem; ssl_certificate_key /etc/nginx/ssl/my.example.com/key.pem; + ssl_session_cache shared:SSL:10m; error_page 500 502 503 504 /50x.html; location = /50x.html { @@ -60,7 +67,6 @@ server { #rewrite ^/?$ /newuser/; rewrite ^/?$ https://www.example.com/; - # geth-pool ETH API location ~ ^/v1/eth/?([^/]*)$ { set $apiKey $1; @@ -71,8 +77,8 @@ server { auth_request_set $user_id $sent_http_x_user_id; rewrite /.*$ / break; - client_max_body_size 3m; - client_body_buffer_size 3m; + client_max_body_size 3m; + client_body_buffer_size 3m; proxy_buffer_size 32k; proxy_buffers 16 32k; proxy_busy_buffers_size 96k; @@ -80,8 +86,10 @@ server { proxy_pass http://geth-pool; proxy_set_header X-Original-Remote-Addr $remote_addr; proxy_set_header X-User-Id $user_id; + proxy_http_version 1.1; + proxy_set_header Connection ""; } - + # keycloak location = /auth { internal; @@ -95,6 +103,8 @@ server { proxy_set_header X-Original-URI $request_uri; proxy_set_header X-Original-Remote-Addr $remote_addr; proxy_set_header X-Original-Host $host; + proxy_http_version 1.1; + proxy_set_header Connection ""; } location /newuser/ { diff --git a/app/data/config/mainnet-eth-plugeth/geth.env b/app/data/config/mainnet-eth-plugeth/geth.env new file mode 100644 index 00000000..5c936d36 --- /dev/null +++ b/app/data/config/mainnet-eth-plugeth/geth.env @@ -0,0 +1,75 @@ +# Enable remote debugging using dlv +CERC_REMOTE_DEBUG=false + +# Enable startup script debug output. +CERC_SCRIPT_DEBUG=false + +# Simple toggle to choose either a 'full' node or an 'archive' node +# (controls the values of --syncmode --gcmode --snapshot) +CERC_GETH_MODE_QUICK_SET=archive + +# Path to plugeth plugins. +CERC_PLUGINS_DIR="/usr/local/lib/plugeth" + +# Will turn on statediffing automatically if CERC_STATEDIFF_DB_HOST exists (see ../mainnet-eth-ipld-eth-db/db.env). +CERC_RUN_STATEDIFF="detect" + +# The minimum necessary verion of the DB to enable statediffing. +CERC_STATEDIFF_DB_GOOSE_MIN_VER=18 + +# Whether all statediff-related DB statements should be logged (useful for debugging). +CERC_STATEDIFF_DB_LOG_STATEMENTS=false + +# The number of concurrent workers to process state diff objects +CERC_STATEDIFF_WORKERS=16 + +# Each statediffing node should have a unique node ID. +CERC_STATEDIFF_DB_NODE_ID=1111 + +# Optional custom node name. +# GETH_NODE_NAME="" + +# Specify any other geth CLI options. +GETH_OPTS="" + +# --cache +GETH_CACHE=1024 + +# --cache.database +GETH_CACHE_DB=50 + +# --cache.gc +GETH_CACHE_GC=25 + +# --cache.trie +GETH_CACHE_TRIE=15 + +# --datadir +GETH_DATADIR="/data" + +# --http.api +GETH_HTTP_API="eth,web3,net" + +# --authrpc.jwtsecret +GETH_JWTSECRET="/etc/mainnet-eth/jwtsecret" + +# --maxpeers +GETH_MAX_PEERS=100 + +# --rpc.evmtimeout +GETH_RPC_EVMTIMEOUT=0 + +# --rpc.gascap +GETH_RPC_GASCAP=0 + +# --txlookuplimit +GETH_TXLOOKUPLIMIT=0 + +# --verbosity +GETH_VERBOSITY=3 + +# --log.vmodule +GETH_VMODULE="rpc/*=4" + +# --ws.api +GETH_WS_API="eth,web3,net" diff --git a/app/data/config/mainnet-eth-plugeth/lighthouse.env b/app/data/config/mainnet-eth-plugeth/lighthouse.env new file mode 100644 index 00000000..11fc6b69 --- /dev/null +++ b/app/data/config/mainnet-eth-plugeth/lighthouse.env @@ -0,0 +1,33 @@ +# Enable startup script debug output. +CERC_SCRIPT_DEBUG=false + +# Specify any other lighthouse CLI options. +LIGHTHOUSE_OPTS="" + +# Override the advertised public IP (optional) +# --enr-address +#LIGHTHOUSE_ENR_ADDRESS="" + +# --checkpoint-sync-url +LIGHTHOUSE_CHECKPOINT_SYNC_URL="https://beaconstate.ethstaker.cc" + +# --checkpoint-sync-url-timeout +LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT=300 + +# --datadir +LIGHTHOUSE_DATADIR=/data + +# --debug-level +LIGHTHOUSE_DEBUG_LEVEL=info + +# --http-port +LIGHTHOUSE_HTTP_PORT=5052 + +# --execution-jwt +LIGHTHOUSE_JWTSECRET=/etc/mainnet-eth/jwtsecret + +# --metrics-port +LIGHTHOUSE_METRICS_PORT=5054 + +# --port --enr-udp-port --enr-tcp-port +LIGHTHOUSE_NETWORK_PORT=9000 diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh b/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh new file mode 100755 index 00000000..1971c2d0 --- /dev/null +++ b/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh @@ -0,0 +1,121 @@ +#!/bin/sh +if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi + +START_CMD="geth" +if [[ "true" == "$CERC_REMOTE_DEBUG" ]] && [[ -x "/usr/local/bin/dlv" ]]; then + START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" +fi + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + + # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process) + pkill -P ${geth_pid} + sleep 2 + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +MODE_FLAGS="" +if [[ "$CERC_GETH_MODE_QUICK_SET" = "archive" ]]; then + MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-full} --gcmode=${GETH_GC_MODE:-archive} --snapshot=${GETH_SNAPSHOT:-false}" +else + MODE_FLAGS="--syncmode=${GETH_SYNC_MODE:-snap} --gcmode=${GETH_GC_MODE:-full} --snapshot=${GETH_SNAPSHOT:-true}" +fi + +if [[ "${CERC_RUN_STATEDIFF}" == "detect" ]] && [[ -n "$CERC_STATEDIFF_DB_HOST" ]]; then + dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short) + dig_status_code=$? + if [[ $dig_status_code = 0 && -n $dig_result ]]; then + echo "Statediff DB at $CERC_STATEDIFF_DB_HOST" + CERC_RUN_STATEDIFF="true" + else + echo "No statediff DB available." + CERC_RUN_STATEDIFF="false" + fi +fi + +STATEDIFF_OPTS="" +if [[ "${CERC_RUN_STATEDIFF}" == "true" ]]; then + ready=0 + echo "Waiting for statediff DB..." + while [ $ready -eq 0 ]; do + sleep 1 + export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" + result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ + -p "$CERC_STATEDIFF_DB_PORT" \ + -U "$CERC_STATEDIFF_DB_USER" \ + -d "$CERC_STATEDIFF_DB_NAME" \ + -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') + if [ -n "$result" ]; then + echo "DB ready..." + if [[ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]]; then + ready=1 + else + echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)" + fi + fi + done + + STATEDIFF_OPTS="--statediff \ + --statediff.db.host=$CERC_STATEDIFF_DB_HOST \ + --statediff.db.name=$CERC_STATEDIFF_DB_NAME \ + --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ + --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \ + --statediff.db.port=$CERC_STATEDIFF_DB_PORT \ + --statediff.db.user=$CERC_STATEDIFF_DB_USER \ + --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ + --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ + --statediff.waitforsync=${CERC_STATEDIFF_WAIT_FO_SYNC:-true} \ + --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \ + --statediff.writing=${CERC_STATEDIFF_WRITING:-true}" + + if [[ -d "${CERC_PLUGINS_DIR}" ]]; then + # With plugeth, we separate the statediff options by prefixing with ' -- ' + STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}" + fi +fi + +$START_CMD \ + $MODE_FLAGS \ + --datadir="${GETH_DATADIR}"\ + --identity="${GETH_NODE_NAME}" \ + --maxpeers=${GETH_MAX_PEERS} \ + --cache=${GETH_CACHE} \ + --cache.gc=${GETH_CACHE_GC} \ + --cache.database=${GETH_CACHE_DB} \ + --cache.trie=${GETH_CACHE_TRIE} \ + --authrpc.addr='0.0.0.0' \ + --authrpc.vhosts='*' \ + --authrpc.jwtsecret="${GETH_JWTSECRET}" \ + --http \ + --http.addr='0.0.0.0' \ + --http.api="${GETH_HTTP_API}" \ + --http.vhosts='*' \ + --metrics \ + --metrics.addr='0.0.0.0' \ + --ws \ + --ws.addr='0.0.0.0' \ + --ws.api="${GETH_WS_API}" \ + --rpc.gascap=${GETH_RPC_GASCAP} \ + --rpc.evmtimeout=${GETH_RPC_EVMTIMEOUT} \ + --txlookuplimit=${GETH_TXLOOKUPLIMIT} \ + --verbosity=${GETH_VERBOSITY} \ + --log.vmodule="${GETH_VMODULE}" \ + ${STATEDIFF_OPTS} \ + ${GETH_OPTS} & + +geth_pid=$! +wait $geth_pid + +if [[ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]]; then + while [[ 1 -eq 1 ]]; do + sleep 60 + done +fi diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh b/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh new file mode 100755 index 00000000..efda735b --- /dev/null +++ b/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh @@ -0,0 +1,30 @@ +#!/bin/bash +if [[ "true" == "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi + +ENR_OPTS="" +if [[ -n "$LIGHTHOUSE_ENR_ADDRESS" ]]; then + ENR_OPTS="--enr-address $LIGHTHOUSE_ENR_ADDRESS" +fi + +exec lighthouse bn \ + --checkpoint-sync-url "$LIGHTHOUSE_CHECKPOINT_SYNC_URL" \ + --checkpoint-sync-url-timeout ${LIGHTHOUSE_CHECKPOINT_SYNC_URL_TIMEOUT} \ + --datadir "$LIGHTHOUSE_DATADIR" \ + --debug-level $LIGHTHOUSE_DEBUG_LEVEL \ + --disable-deposit-contract-sync \ + --disable-upnp \ + --enr-tcp-port $LIGHTHOUSE_NETWORK_PORT \ + --enr-udp-port $LIGHTHOUSE_NETWORK_PORT \ + --execution-endpoint "$LIGHTHOUSE_EXECUTION_ENDPOINT" \ + --execution-jwt /etc/mainnet-eth/jwtsecret \ + --http \ + --http-address 0.0.0.0 \ + --http-port $LIGHTHOUSE_HTTP_PORT \ + --metrics \ + --metrics-address=0.0.0.0 \ + --metrics-port $LIGHTHOUSE_METRICS_PORT \ + --network mainnet \ + --port $LIGHTHOUSE_NETWORK_PORT \ + $ENR_OPTS $LIGHTHOUSE_OPTS diff --git a/app/data/config/mainnet-eth/geth.env b/app/data/config/mainnet-eth/geth.env index a01444df..365bb5fb 100644 --- a/app/data/config/mainnet-eth/geth.env +++ b/app/data/config/mainnet-eth/geth.env @@ -25,7 +25,7 @@ GETH_CACHE_GC=25 # --cache.trie GETH_CACHE_TRIE=15 -j + # --datadir GETH_DATADIR="/data" diff --git a/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile b/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile new file mode 100644 index 00000000..87d050ea --- /dev/null +++ b/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile @@ -0,0 +1,22 @@ +# Using the same golang image as used to build plugeth: https://git.vdb.to/cerc-io/plugeth/src/branch/statediff/Dockerfile +FROM golang:1.20-alpine3.18 as delve + +# Add delve so that we can do remote debugging. +RUN go install github.com/go-delve/delve/cmd/dlv@latest + +FROM cerc/plugeth-statediff:local as statediff +FROM cerc/plugeth:local as plugeth + +FROM alpine:3.18 + +# Install tools often used in scripting, like bash, wget, and jq. +RUN apk add --no-cache ca-certificates bash wget curl python3 bind-tools postgresql-client jq + +COPY --from=delve /go/bin/dlv /usr/local/bin/ +COPY --from=plugeth /usr/local/bin/geth /usr/local/bin/ + +# Place all plugeth plugins in /usr/local/lib/plugeth +COPY --from=statediff /usr/local/lib/statediff.so /usr/local/lib/plugeth/ + +EXPOSE 8545 8546 8551 6060 30303 30303/udp 40000 +ENTRYPOINT ["geth"] diff --git a/app/data/container-build/cerc-plugeth-with-plugins/build.sh b/app/data/container-build/cerc-plugeth-with-plugins/build.sh new file mode 100755 index 00000000..9ab44946 --- /dev/null +++ b/app/data/container-build/cerc-plugeth-with-plugins/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/cerc-plugeth-with-plugins +set -x + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/plugeth-with-plugins:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/app/data/stacks/mainnet-eth-plugeth/README.md b/app/data/stacks/mainnet-eth-plugeth/README.md new file mode 100644 index 00000000..8ed6bebb --- /dev/null +++ b/app/data/stacks/mainnet-eth-plugeth/README.md @@ -0,0 +1,141 @@ +# mainnet-eth-plugeth + +Deploys a "head-tracking" mainnet Ethereum stack comprising a [plugeth](https://git.vdb.to/cerc-io/plugeth) execution layer node and a [lighthouse](https://github.com/sigp/lighthouse) consensus layer node, with [plugeth-statediff](https://git.vdb.to/cerc-io/plugeth-statediff) for statediffing, [ipld-eth-db](https://git.vdb.to/cerc-io/ipld-eth-db) for storage, and [ipld-eth-server](https://git.vdb.to/cerc-io/ipld-eth-server) for indexed ETH IPLD objects. + +## Clone required repositories + +``` +$ laconic-so --stack mainnet-eth-plugeth setup-repositories +``` + +## Build containers + +``` +$ laconic-so --stack mainnet-eth-plugeth build-containers +``` + +## Create a deployment + +``` +$ laconic-so --stack mainnet-eth-plugeth deploy init --map-ports-to-host any-same --output mainnet-eth-plugeth-spec.yml +$ laconic-so --stack mainnet-eth-plugeth deploy create --spec-file mainnet-eth-plugeth-spec.yml --deployment-dir mainnet-eth-plugeth-deployment +``` +## Start the stack +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment start +``` +Display stack status: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment ps +Running containers: +id: f39608eca04d72d6b0f1f3acefc5ebb52908da06e221d20c7138f7e3dff5e423, name: laconic-ef641b4d13eb61ed561b19be67063241-foundry-1, ports: +id: 4052b1eddd886ae0d6b41f9ff22e68a70f267b2bfde10f4b7b79b5bd1eeddcac, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-geth-1-1, ports: 30303/tcp, 30303/udp, 0.0.0.0:49184->40000/tcp, 0.0.0.0:49185->6060/tcp, 0.0.0.0:49186->8545/tcp, 8546/tcp +id: ac331232e597944b621b3b8942ace5dafb14524302cab338ff946c7f6e5a1d52, name: laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1, ports: 0.0.0.0:49187->8001/tcp +``` +See stack logs: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment logs +time="2023-07-25T09:46:29-06:00" level=warning msg="The \"CERC_SCRIPT_DEBUG\" variable is not set. Defaulting to a blank string." +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.362 INFO Logging to file path: "/var/lighthouse-data-dir/beacon/logs/beacon.log" +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Lighthouse started version: Lighthouse/v4.1.0-693886b +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.365 INFO Configured for network name: mainnet +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Data directory initialised datadir: /var/lighthouse-data-dir +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.366 INFO Deposit contract address: 0x00000000219ab540356cbb839cbe05303d7705fa, deploy_block: 11184524 +laconic-ef641b4d13eb61ed561b19be67063241-mainnet-eth-plugeth-lighthouse-1-1 | Jul 25 15:45:13.424 INFO Starting checkpoint sync remote_url: https://beaconstate.ethstaker.cc/, service: beacon +``` +## Monitoring stack sync progress +Both go-ethereum and lighthouse will engage in an initial chain sync phase that will last up to several hours depending on hardware performance and network capacity. +Syncing can be monitored by looking for these log messages: +``` +Jul 24 12:34:17.001 INFO Downloading historical blocks est_time: 5 days 11 hrs, speed: 14.67 slots/sec, distance: 6932481 slots (137 weeks 3 days), service: slot_notifier +INFO [07-24|12:14:52.493] Syncing beacon headers downloaded=145,920 left=17,617,968 eta=1h23m32.815s +INFO [07-24|12:33:15.238] Syncing: chain download in progress synced=1.86% chain=148.94MiB headers=368,640@95.03MiB bodies=330,081@40.56MiB receipts=330,081@13.35MiB eta=37m54.505s +INFO [07-24|12:35:13.028] Syncing: state download in progress synced=1.32% state=4.64GiB accounts=2,850,314@677.57MiB slots=18,663,070@3.87GiB codes=26662@111.14MiB eta=3h18m0.699s +``` +Once synced up these log messages will be observed: +``` +INFO Synced slot: 6952515, block: 0x5bcb…f6d9, epoch: 217266, finalized_epoch: 217264, finalized_root: 0x6342…2c5c, exec_hash: 0x8d8c…2443 (verified), peers: 31, service: slot_notifier +INFO [07-25|03:04:48.941] Imported new potential chain segment number=17,767,316 hash=84f6e7..bc2cb0 blocks=1 txs=137 mgas=16.123 elapsed=57.087ms mgasps=282.434 dirty=461.46MiB +INFO [07-25|03:04:49.042] Chain head was updated number=17,767,316 hash=84f6e7..bc2cb0 root=ca58b2..8258c1 elapsed=2.480111ms +``` +## Clean up + +Stop the stack: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop +``` +This leaves data volumes in place, allowing the stack to be subsequently re-started. +To permanently *delete* the stack's data volumes run: +``` +$ laconic-so deployment --dir mainnet-eth-plugeth-deployment stop --delete-data-volumes +``` +After deleting the volumes, any subsequent re-start will begin chain sync from cold. + +## Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be +customized by editing the "spec" file generated by `laconic-so deploy init`. + +In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while +port `40000` has been left with random assignment: +``` +$ cat mainnet-eth-plugeth-spec.yml +stack: mainnet-eth-plugeth +ports: + mainnet-eth-plugeth-geth-1: + - '10.10.10.10:8545:8545' + - '40000' + mainnet-eth-plugeth-lighthouse-1: + - '10.10.10.10:5052:5052' +volumes: + mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data + mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data + mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data +``` +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | +## Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory: +``` +$ cat mainnet-eth-plugeth-spec.yml +stack: mainnet-eth-plugeth +ports: + mainnet-eth-plugeth-geth-1: + - '10.10.10.10:8545:8545' + - '40000' + mainnet-eth-plugeth-lighthouse-1: + - '10.10.10.10:5052:5052' +volumes: + mainnet_eth_plugeth_config_data: ./data/mainnet_eth_plugeth_config_data + mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data + mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data +``` +A synced-up stack will consume around 900GB of data volume space: +``` +$ sudo du -h mainnet-eth-plugeth-deployment/data/ +150M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/freezer_db +25G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/chain_db +16K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/network +368M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon/logs +26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data/beacon +26G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_lighthouse_1_data +8.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_config_data +4.0K mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/keystore +527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient/chain +527G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata/ancient +859G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/chaindata +4.8M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/nodes +242M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/ethash +669M mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth/triecache +860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data/geth +860G mainnet-eth-plugeth-deployment/data/mainnet_eth_plugeth_geth_1_data +885G mainnet-eth-plugeth-deployment/data/ +``` diff --git a/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py b/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py new file mode 100644 index 00000000..5aba9547 --- /dev/null +++ b/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py @@ -0,0 +1,32 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from secrets import token_hex + + +def init(ctx): + return None + + +def setup(ctx): + return None + + +def create(ctx, extra_args): + # Generate the JWT secret and save to its config file + secret = token_hex(32) + jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret") + with open(jwt_file_path, 'w+') as jwt_file: + jwt_file.write(secret) diff --git a/app/data/stacks/mainnet-eth-plugeth/stack.yml b/app/data/stacks/mainnet-eth-plugeth/stack.yml new file mode 100644 index 00000000..7ade244c --- /dev/null +++ b/app/data/stacks/mainnet-eth-plugeth/stack.yml @@ -0,0 +1,29 @@ +version: "1.2" +name: mainnet-eth +description: "Ethereum Mainnet" +repos: + - git.vdb.to/cerc-io/plugeth@statediff + - git.vdb.to/cerc-io/plugeth-statediff + - git.vdb.to/cerc-io/lighthouse + - git.vdb.to/cerc-io/ipld-eth-db@v5 + - git.vdb.to/cerc-io/ipld-eth-server@v5 + - git.vdb.to/cerc-io/keycloak-reg-api + - git.vdb.to/cerc-io/keycloak-reg-ui +containers: + - cerc/plugeth-statediff + - cerc/plugeth + - cerc/plugeth-with-plugins + - cerc/lighthouse + - cerc/lighthouse-cli + - cerc/ipld-eth-db + - cerc/ipld-eth-server + - cerc/keycloak + - cerc/webapp-base + - cerc/keycloak-reg-api + - cerc/keycloak-reg-ui +pods: + - mainnet-eth-plugeth + - mainnet-eth-ipld-eth-db + - mainnet-eth-ipld-eth-server + - mainnet-eth-keycloak + - mainnet-eth-metrics From 0f5b1a097b4c2ceac6f41c309a88ee8b15429b5c Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 25 Oct 2023 14:47:53 -0500 Subject: [PATCH 02/62] Add plugeth to chain-chunker stack (needed for new verify option). (#610) --- app/data/stacks/chain-chunker/stack.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/data/stacks/chain-chunker/stack.yml b/app/data/stacks/chain-chunker/stack.yml index d85aa057..2705f69a 100644 --- a/app/data/stacks/chain-chunker/stack.yml +++ b/app/data/stacks/chain-chunker/stack.yml @@ -6,8 +6,10 @@ repos: - git.vdb.to/cerc-io/eth-statediff-service@v5 - git.vdb.to/cerc-io/ipld-eth-db@v5 - git.vdb.to/cerc-io/ipld-eth-server@v5 + - git.vdb.to/cerc-io/plugeth@statediff containers: - cerc/ipld-eth-state-snapshot - cerc/eth-statediff-service - cerc/ipld-eth-db - cerc/ipld-eth-server + - cerc/plugeth From 36d1e0eedd95032c8f3085609f068d8ce72b8087 Mon Sep 17 00:00:00 2001 From: Ian Kay Date: Thu, 26 Oct 2023 17:26:42 -0400 Subject: [PATCH 03/62] add fixturenet-laconicd test --- .gitea/workflows/fixturenet-laconicd-test.yml | 55 ++++++++++++ .../triggers/fixturenet-laconicd-test | 2 + .github/workflows/fixturenet-laconicd.yml | 36 ++++++++ .../triggers/fixturenet-laconicd-test | 3 + tests/fixturenet-laconicd/run-test.sh | 84 +++++++++++++++++++ 5 files changed, 180 insertions(+) create mode 100644 .gitea/workflows/fixturenet-laconicd-test.yml create mode 100644 .gitea/workflows/triggers/fixturenet-laconicd-test create mode 100644 .github/workflows/fixturenet-laconicd.yml create mode 100644 .github/workflows/triggers/fixturenet-laconicd-test create mode 100755 tests/fixturenet-laconicd/run-test.sh diff --git a/.gitea/workflows/fixturenet-laconicd-test.yml b/.gitea/workflows/fixturenet-laconicd-test.yml new file mode 100644 index 00000000..ac397dad --- /dev/null +++ b/.gitea/workflows/fixturenet-laconicd-test.yml @@ -0,0 +1,55 @@ +name: Fixturenet-Laconicd-Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.gitea/workflows/triggers/fixturenet-laconicd-test' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + + +jobs: + test: + name: "Run an Laconicd fixturenet test" + runs-on: ubuntu-latest + steps: + - name: 'Update' + run: apt-get update + - name: 'Setup jq' + run: apt-get install jq -y + - name: 'Check jq' + run: | + which jq + jq --version + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Run fixturenet-laconicd tests" + run: ./tests/fixturenet-laconicd/run-test.sh diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test new file mode 100644 index 00000000..e6b73875 --- /dev/null +++ b/.gitea/workflows/triggers/fixturenet-laconicd-test @@ -0,0 +1,2 @@ +Change this file to trigger running the fixturenet-laconicd-test CI job + diff --git a/.github/workflows/fixturenet-laconicd.yml b/.github/workflows/fixturenet-laconicd.yml new file mode 100644 index 00000000..5c712e9b --- /dev/null +++ b/.github/workflows/fixturenet-laconicd.yml @@ -0,0 +1,36 @@ +name: Fixturenet-Laconicd Test + +on: + push: + branches: '*' + paths: + - '!**' + - '.github/workflows/triggers/fixturenet-laconicd-test' + +jobs: + test: + name: "Run fixturenet-laconicd test suite" + runs-on: ubuntu-latest + steps: + - name: 'Setup jq' + run: apt-get install jq -y + - name: 'Check jq' + run: | + which jq + jq --version + - name: "Clone project repository" + uses: actions/checkout@v3 + - name: "Install Python" + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Run fixturenet-laconicd tests" + run: ./tests/fixturenet-laconicd/run-test.sh diff --git a/.github/workflows/triggers/fixturenet-laconicd-test b/.github/workflows/triggers/fixturenet-laconicd-test new file mode 100644 index 00000000..09e6c044 --- /dev/null +++ b/.github/workflows/triggers/fixturenet-laconicd-test @@ -0,0 +1,3 @@ +Change this file to trigger running the fixturenet-laconicd-test CI job + +trigger \ No newline at end of file diff --git a/tests/fixturenet-laconicd/run-test.sh b/tests/fixturenet-laconicd/run-test.sh new file mode 100755 index 00000000..8dad9917 --- /dev/null +++ b/tests/fixturenet-laconicd/run-test.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Laconicd fixturenet test" +env +cat /etc/hosts +# Bit of a hack, test the most recent package +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Set a new unique repo dir +export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX) +echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO" +echo "$(date +"%Y-%m-%d %T"): Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "$(date +"%Y-%m-%d %T"): Version reported is: ${reported_version_string}" + +echo "$(date +"%Y-%m-%d %T"): Cloning laconicd repositories into: $CERC_REPO_BASE_DIR" +$TEST_TARGET_SO --stack fixturenet-laconicd setup-repositories + +echo "$(date +"%Y-%m-%d %T"): Building containers" +$TEST_TARGET_SO --stack fixturenet-laconicd build-containers +echo "$(date +"%Y-%m-%d %T"): Starting stack" +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up +echo "$(date +"%Y-%m-%d %T"): Stack started" +# Verify that the fixturenet is up and running +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps + +timeout=900 # 15 minutes +echo "$(date +"%Y-%m-%d %T"): Getting initial block number. Timeout set to $timeout seconds" +start_time=$(date +%s) +elapsed_time=0 +initial_block_number=null +while [ "$initial_block_number" == "null" ] && [ $elapsed_time -lt $timeout ]; do + sleep 10 + echo "$(date +"%Y-%m-%d %T"): Waiting for initial block..." + initial_block_number=$(docker exec laconicd-laconicd-1 /usr/bin/laconicd status | jq -r .SyncInfo.latest_block_height) + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) +done + +subsequent_block_number=$initial_block_number + +# if initial block was 0 after timeout, assume chain did not start successfully and skip finding subsequent block +if [[ $initial_block_number != "null" ]]; then + timeout=300 + echo "$(date +"%Y-%m-%d %T"): Getting subsequent block number. Timeout set to $timeout seconds" + start_time=$(date +%s) + elapsed_time=0 + # wait for 5 blocks or timeout + while [ "$subsequent_block_number" -le $((initial_block_number + 5)) ] && [ $elapsed_time -lt $timeout ]; do + sleep 10 + echo "$(date +"%Y-%m-%d %T"): Waiting for five blocks or $timeout seconds..." + subsequent_block_number=$(docker exec laconicd-laconicd-1 /usr/bin/laconicd status | jq -r .SyncInfo.latest_block_height) + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + done +fi + +# will return 0 if either of the above loops timed out +block_number_difference=$((subsequent_block_number - initial_block_number)) + +echo "$(date +"%Y-%m-%d %T"): Results of block height queries:" +echo "Initial block height: $initial_block_number" +echo "Subsequent block height: $subsequent_block_number" + +# Block height difference should be between 1 and some small number +if [[ $block_number_difference -gt 1 && $block_number_difference -lt 100 ]]; then + echo "Test passed" + test_result=0 +else + echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}" + echo "Logs from stack:" + $TEST_TARGET_SO --stack fixturenet-laconicd deploy logs + test_result=1 +fi + +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes +echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories" +rm -rf $CERC_REPO_BASE_DIR +echo "$(date +"%Y-%m-%d %T"): Test finished" +exit $test_result From f198f43b3a42dff623a43abfc5262a3b59600866 Mon Sep 17 00:00:00 2001 From: Ian Date: Fri, 27 Oct 2023 08:48:44 -0400 Subject: [PATCH 04/62] add newline --- .github/workflows/triggers/fixturenet-laconicd-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/triggers/fixturenet-laconicd-test b/.github/workflows/triggers/fixturenet-laconicd-test index 09e6c044..ad4c76a7 100644 --- a/.github/workflows/triggers/fixturenet-laconicd-test +++ b/.github/workflows/triggers/fixturenet-laconicd-test @@ -1,3 +1,3 @@ Change this file to trigger running the fixturenet-laconicd-test CI job -trigger \ No newline at end of file +trigger From 6130eab5cb7add6db363b71a3c0195da230f3971 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Fri, 27 Oct 2023 10:19:44 -0600 Subject: [PATCH 05/62] k8s deploy (#614) --- app/deploy/k8s/cluster_info.py | 84 ++++++++++++++++++++++++++++++++++ app/deploy/k8s/deploy_k8s.py | 65 +++++++++++++++++++++++--- app/deploy/k8s/helpers.py | 57 +++++++++++++++++++++++ app/opts.py | 20 ++++++++ cli.py | 5 +- 5 files changed, 224 insertions(+), 7 deletions(-) create mode 100644 app/deploy/k8s/cluster_info.py create mode 100644 app/deploy/k8s/helpers.py create mode 100644 app/opts.py diff --git a/app/deploy/k8s/cluster_info.py b/app/deploy/k8s/cluster_info.py new file mode 100644 index 00000000..540f5f8c --- /dev/null +++ b/app/deploy/k8s/cluster_info.py @@ -0,0 +1,84 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from kubernetes import client +from typing import Any, List, Set + +from app.opts import opts +from app.util import get_yaml + + +class ClusterInfo: + parsed_pod_yaml_map: Any = {} + image_set: Set[str] = set() + app_name: str = "test-app" + deployment_name: str = "test-deployment" + + def __init__(self) -> None: + pass + + def int_from_pod_files(self, pod_files: List[str]): + for pod_file in pod_files: + with open(pod_file, "r") as pod_file_descriptor: + parsed_pod_file = get_yaml().load(pod_file_descriptor) + self.parsed_pod_yaml_map[pod_file] = parsed_pod_file + if opts.o.debug: + print(f"parsed_pod_yaml_map: {self.parsed_pod_yaml_map}") + # Find the set of images in the pods + for pod_name in self.parsed_pod_yaml_map: + pod = self.parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + service_info = services[service_name] + image = service_info["image"] + self.image_set.add(image) + if opts.o.debug: + print(f"image_set: {self.image_set}") + + def get_deployment(self): + containers = [] + for pod_name in self.parsed_pod_yaml_map: + pod = self.parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + container_name = service_name + service_info = services[service_name] + image = service_info["image"] + container = client.V1Container( + name=container_name, + image=image, + ports=[client.V1ContainerPort(container_port=80)], + resources=client.V1ResourceRequirements( + requests={"cpu": "100m", "memory": "200Mi"}, + limits={"cpu": "500m", "memory": "500Mi"}, + ), + ) + containers.append(container) + template = client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta(labels={"app": self.app_name}), + spec=client.V1PodSpec(containers=containers), + ) + spec = client.V1DeploymentSpec( + replicas=1, template=template, selector={ + "matchLabels": + {"app": self.app_name}}) + + deployment = client.V1Deployment( + api_version="apps/v1", + kind="Deployment", + metadata=client.V1ObjectMeta(name=self.deployment_name), + spec=spec, + ) + return deployment diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py index 7cf0261d..e67f3974 100644 --- a/app/deploy/k8s/deploy_k8s.py +++ b/app/deploy/k8s/deploy_k8s.py @@ -14,33 +14,86 @@ # along with this program. If not, see . from kubernetes import client, config + from app.deploy.deployer import Deployer +from app.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind +from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string +from app.deploy.k8s.cluster_info import ClusterInfo +from app.opts import opts class K8sDeployer(Deployer): name: str = "k8s" + core_api: client.CoreV1Api + apps_api: client.AppsV1Api + kind_cluster_name: str + cluster_info : ClusterInfo def __init__(self, compose_files, compose_project_name, compose_env_file) -> None: - config.load_kube_config() - self.client = client.CoreV1Api() + if (opts.o.debug): + print(f"Compose files: {compose_files}") + print(f"Project name: {compose_project_name}") + print(f"Env file: {compose_env_file}") + self.kind_cluster_name = compose_project_name + self.cluster_info = ClusterInfo() + self.cluster_info.int_from_pod_files(compose_files) + + def connect_api(self): + config.load_kube_config(context=f"kind-{self.kind_cluster_name}") + self.core_api = client.CoreV1Api() + self.apps_api = client.AppsV1Api() def up(self, detach, services): - pass + # Create the kind cluster + create_cluster(self.kind_cluster_name) + self.connect_api() + # Ensure the referenced containers are copied into kind + load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + # Process compose files into a Deployment + deployment = self.cluster_info.get_deployment() + # Create the k8s objects + resp = self.apps_api.create_namespaced_deployment( + body=deployment, namespace="default" + ) + + if opts.o.debug: + print("Deployment created.\n") + print(f"{resp.metadata.namespace} {resp.metadata.name} \ + {resp.metadata.generation} {resp.spec.template.spec.containers[0].image}") def down(self, timeout, volumes): - pass + # Delete the k8s objects + # Destroy the kind cluster + destroy_cluster(self.kind_cluster_name) def ps(self): - pass + self.connect_api() + # Call whatever API we need to get the running container list + ret = self.core_api.list_pod_for_all_namespaces(watch=False) + if ret.items: + for i in ret.items: + print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) + ret = self.core_api.list_node(pretty=True, watch=False) + return [] def port(self, service, private_port): + # Since we handle the port mapping, need to figure out where this comes from + # Also look into whether it makes sense to get ports for k8s pass def execute(self, service_name, command, envs): + # Call the API to execute a command in a running container pass def logs(self, services, tail, follow, stream): - pass + self.connect_api() + pods = pods_in_deployment(self.core_api, "test-deployment") + if len(pods) > 1: + print("Warning: more than one pod in the deployment") + k8s_pod_name = pods[0] + log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test") + return log_stream_from_string(log_data) def run(self, image, command, user, volumes, entrypoint=None): + # We need to figure out how to do this -- check why we're being called first pass diff --git a/app/deploy/k8s/helpers.py b/app/deploy/k8s/helpers.py new file mode 100644 index 00000000..731d667d --- /dev/null +++ b/app/deploy/k8s/helpers.py @@ -0,0 +1,57 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from kubernetes import client +import subprocess +from typing import Set + +from app.opts import opts + + +def _run_command(command: str): + if opts.o.debug: + print(f"Running: {command}") + result = subprocess.run(command, shell=True) + if opts.o.debug: + print(f"Result: {result}") + + +def create_cluster(name: str): + _run_command(f"kind create cluster --name {name}") + + +def destroy_cluster(name: str): + _run_command(f"kind delete cluster --name {name}") + + +def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]): + for image in image_set: + _run_command(f"kind load docker-image {image} --name {kind_cluster_name}") + + +def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str): + pods = [] + pod_response = core_api.list_namespaced_pod(namespace="default", label_selector="app=test-app") + if opts.o.debug: + print(f"pod_response: {pod_response}") + for pod_info in pod_response.items: + pod_name = pod_info.metadata.name + pods.append(pod_name) + return pods + + +def log_stream_from_string(s: str): + # Note response has to be UTF-8 encoded because the caller expects to decode it + yield ("ignore", s.encode()) diff --git a/app/opts.py b/app/opts.py new file mode 100644 index 00000000..193637c2 --- /dev/null +++ b/app/opts.py @@ -0,0 +1,20 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from app.command_types import CommandOptions + + +class opts: + o: CommandOptions = None diff --git a/cli.py b/cli.py index 5dea43ca..38bdddd9 100644 --- a/cli.py +++ b/cli.py @@ -22,6 +22,7 @@ from app.build import build_npms from app.deploy import deploy from app import version from app.deploy import deployment +from app import opts from app import update CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @@ -39,7 +40,9 @@ CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.pass_context def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error): """Laconic Stack Orchestrator""" - ctx.obj = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + opts.opts.o = command_options + ctx.obj = command_options cli.add_command(setup_repositories.command, "setup-repositories") From 8cac5986790560241603cdb94c8d3c07b5f63507 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Fri, 27 Oct 2023 13:57:13 -0500 Subject: [PATCH 06/62] Split act-runner into its own pod and offer as a distinct stack. (#612) * Split act-runner into its own pod and offer as a distinct stack. --- .../cerc-act-runner-task-executor/build.sh | 2 +- app/data/stacks/act-runner/README.md | 15 ++++ app/data/stacks/act-runner/stack.yml | 15 ++++ app/data/stacks/package-registry/stack.yml | 5 ++ app/deploy/deploy.py | 4 +- app/deploy/deployment_create.py | 72 ++++++++++--------- app/util.py | 16 ++--- 7 files changed, 85 insertions(+), 44 deletions(-) create mode 100644 app/data/stacks/act-runner/README.md create mode 100644 app/data/stacks/act-runner/stack.yml diff --git a/app/data/container-build/cerc-act-runner-task-executor/build.sh b/app/data/container-build/cerc-act-runner-task-executor/build.sh index 25620a53..b625ed4b 100755 --- a/app/data/container-build/cerc-act-runner-task-executor/build.sh +++ b/app/data/container-build/cerc-act-runner-task-executor/build.sh @@ -2,4 +2,4 @@ # Build a local version of the task executor for act-runner source ${CERC_CONTAINER_BASE_DIR}/build-base.sh SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/gitea/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR} +docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/act-runner/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR} diff --git a/app/data/stacks/act-runner/README.md b/app/data/stacks/act-runner/README.md new file mode 100644 index 00000000..e623a9c6 --- /dev/null +++ b/app/data/stacks/act-runner/README.md @@ -0,0 +1,15 @@ +# act-runner stack + +## Example + +``` +$ laconic-so --stack act-runner deploy init --output act-runner.yml + +$ laconic-so --stack act-runner deploy create --spec-file act-runner.yml --deployment-dir ~/opt/deployments/act-runner-1 +$ echo "CERC_GITEA_RUNNER_REGISTRATION_TOKEN=FOO" >> ~/opt/deployments/act-runner-1/config.env +$ laconic-so deployment --dir ~/opt/deployments/act-runner-1 up + +$ laconic-so --stack act-runner deploy create --spec-file act-runner.yml --deployment-dir ~/opt/deployments/act-runner-2 +$ echo "CERC_GITEA_RUNNER_REGISTRATION_TOKEN=BAR" >> ~/opt/deployments/act-runner-2/config.env +$ laconic-so deployment --dir ~/opt/deployments/act-runner-2 up +``` diff --git a/app/data/stacks/act-runner/stack.yml b/app/data/stacks/act-runner/stack.yml new file mode 100644 index 00000000..a236fccf --- /dev/null +++ b/app/data/stacks/act-runner/stack.yml @@ -0,0 +1,15 @@ +version: "1.1" +name: act-runner +description: "Local act-runner" +repos: + - git.vdb.to/cerc-io/hosting + - gitea.com/gitea/act_runner +containers: + - cerc/act-runner + - cerc/act-runner-task-executor +pods: + - name: act-runner + repository: cerc-io/hosting + path: act-runner + pre_start_command: "pre_start.sh" + post_start_command: "post_start.sh" diff --git a/app/data/stacks/package-registry/stack.yml b/app/data/stacks/package-registry/stack.yml index 33c6c939..f6367ab1 100644 --- a/app/data/stacks/package-registry/stack.yml +++ b/app/data/stacks/package-registry/stack.yml @@ -13,3 +13,8 @@ pods: path: gitea pre_start_command: "run-this-first.sh" post_start_command: "initialize-gitea.sh" + - name: act-runner + repository: cerc-io/hosting + path: act-runner + pre_start_command: "pre_start.sh" + post_start_command: "post_start.sh" diff --git a/app/deploy/deploy.py b/app/deploy/deploy.py index 40cd0a8d..a2d7cc01 100644 --- a/app/deploy/deploy.py +++ b/app/deploy/deploy.py @@ -307,7 +307,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") else: if deployment: - compose_file_name = os.path.join(compose_dir, "docker-compose.yml") + compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml") pod_pre_start_command = pod["pre_start_command"] pod_post_start_command = pod["post_start_command"] script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts") @@ -317,7 +317,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): post_start_commands.append(os.path.join(script_dir, pod_post_start_command)) else: pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) - compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml") + compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml") pod_pre_start_command = pod["pre_start_command"] pod_post_start_command = pod["post_start_command"] if pod_pre_start_command is not None: diff --git a/app/deploy/deployment_create.py b/app/deploy/deployment_create.py index dcaccb2b..a7cbe57e 100644 --- a/app/deploy/deployment_create.py +++ b/app/deploy/deployment_create.py @@ -22,7 +22,7 @@ import random from shutil import copy, copyfile, copytree import sys from app.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, - get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_path) + get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths) from app.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand @@ -106,9 +106,10 @@ def _fixup_pod_file(pod, spec, compose_dir): pod["services"][container_name]["ports"] = container_ports -def _commands_plugin_path(ctx: DeployCommandContext): - plugin_path = get_plugin_code_path(ctx.stack) - return plugin_path.joinpath("deploy", "commands.py") +def _commands_plugin_paths(ctx: DeployCommandContext): + plugin_paths = get_plugin_code_paths(ctx.stack) + ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths] + return ret # See: https://stackoverflow.com/a/54625079/1701505 @@ -120,15 +121,23 @@ def call_stack_deploy_init(deploy_command_context): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deploy_command_context) - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "init"): - return imported_stack.init(deploy_command_context) - else: - return None + python_file_paths = _commands_plugin_paths(deploy_command_context) + + ret = None + init_done = False + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "init"): + if not init_done: + ret = imported_stack.init(deploy_command_context) + init_done = True + else: + # TODO: remove this restriction + print(f"Skipping init() from plugin {python_file_path}. Only one init() is allowed.") + return ret # TODO: fold this with function above @@ -136,16 +145,14 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deploy_command_context) - print(f"Path: {python_file_path}") - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "setup"): - return imported_stack.setup(deploy_command_context, parameters, extra_args) - else: - return None + python_file_paths = _commands_plugin_paths(deploy_command_context) + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "setup"): + imported_stack.setup(deploy_command_context, parameters, extra_args) # TODO: fold this with function above @@ -153,15 +160,14 @@ def call_stack_deploy_create(deployment_context, extra_args): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_path = _commands_plugin_path(deployment_context.command_context) - if python_file_path.exists(): - spec = util.spec_from_file_location("commands", python_file_path) - imported_stack = util.module_from_spec(spec) - spec.loader.exec_module(imported_stack) - if _has_method(imported_stack, "create"): - return imported_stack.create(deployment_context, extra_args) - else: - return None + python_file_paths = _commands_plugin_paths(deployment_context.command_context) + for python_file_path in python_file_paths: + if python_file_path.exists(): + spec = util.spec_from_file_location("commands", python_file_path) + imported_stack = util.module_from_spec(spec) + spec.loader.exec_module(imported_stack) + if _has_method(imported_stack, "create"): + imported_stack.create(deployment_context, extra_args) # Inspect the pod yaml to find config files referenced in subdirectories @@ -336,7 +342,7 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): if global_options(ctx).debug: print(f"extra config dirs: {extra_config_dirs}") _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) - with open(os.path.join(destination_compose_dir, os.path.basename(pod_file_path)), "w") as output_file: + with open(os.path.join(destination_compose_dir, "docker-compose-%s.yml" % pod), "w") as output_file: yaml.dump(parsed_pod_file, output_file) # Copy the config files for the pod, if any config_dirs = {pod} diff --git a/app/util.py b/app/util.py index a25aacdb..d3b733a2 100644 --- a/app/util.py +++ b/app/util.py @@ -79,16 +79,16 @@ def get_pod_list(parsed_stack): return result -def get_plugin_code_path(stack): +def get_plugin_code_paths(stack): parsed_stack = get_parsed_stack_config(stack) pods = parsed_stack["pods"] - # TODO: Hack - pod = pods[0] - if type(pod) is str: - result = get_stack_file_path(stack).parent - else: - pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) - result = Path(os.path.join(pod_root_dir, "stack")) + result = [] + for pod in pods: + if type(pod) is str: + result.append(get_stack_file_path(stack).parent) + else: + pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]) + result.append(Path(os.path.join(pod_root_dir, "stack"))) return result From 86076c7ed82b6536e3d3c90a90d2b1769a5f7563 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Sun, 29 Oct 2023 22:26:15 -0600 Subject: [PATCH 07/62] Fix deployer.exec() (#619) --- app/deploy/compose/deploy_docker.py | 4 ++-- app/deploy/deployer.py | 2 +- app/deploy/k8s/deploy_k8s.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/deploy/compose/deploy_docker.py b/app/deploy/compose/deploy_docker.py index 03306346..6a70f318 100644 --- a/app/deploy/compose/deploy_docker.py +++ b/app/deploy/compose/deploy_docker.py @@ -48,9 +48,9 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) - def execute(self, service, command, envs): + def execute(self, service, command, tty, envs): try: - return self.docker.compose.execute(service=service, command=command, envs=envs) + return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs) except DockerException as e: raise DeployerException(e) diff --git a/app/deploy/deployer.py b/app/deploy/deployer.py index b46a2d23..51b6010d 100644 --- a/app/deploy/deployer.py +++ b/app/deploy/deployer.py @@ -35,7 +35,7 @@ class Deployer(ABC): pass @abstractmethod - def execute(self, service_name, command, envs): + def execute(self, service_name, command, tty, envs): pass @abstractmethod diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py index e67f3974..bb1fcd87 100644 --- a/app/deploy/k8s/deploy_k8s.py +++ b/app/deploy/k8s/deploy_k8s.py @@ -81,7 +81,7 @@ class K8sDeployer(Deployer): # Also look into whether it makes sense to get ports for k8s pass - def execute(self, service_name, command, envs): + def execute(self, service_name, command, tty, envs): # Call the API to execute a command in a running container pass From b92d9cd7dd790b1396c0c46977e71cecf90e0912 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Sun, 29 Oct 2023 23:16:39 -0600 Subject: [PATCH 08/62] Update stack README.md to use config directive --- app/data/stacks/act-runner/README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/app/data/stacks/act-runner/README.md b/app/data/stacks/act-runner/README.md index e623a9c6..3c6dd7b1 100644 --- a/app/data/stacks/act-runner/README.md +++ b/app/data/stacks/act-runner/README.md @@ -3,13 +3,11 @@ ## Example ``` -$ laconic-so --stack act-runner deploy init --output act-runner.yml - -$ laconic-so --stack act-runner deploy create --spec-file act-runner.yml --deployment-dir ~/opt/deployments/act-runner-1 -$ echo "CERC_GITEA_RUNNER_REGISTRATION_TOKEN=FOO" >> ~/opt/deployments/act-runner-1/config.env +$ laconic-so --stack act-runner deploy init --output act-runner-1.yml --config CERC_GITEA_RUNNER_REGISTRATION_TOKEN=FOO +$ laconic-so --stack act-runner deploy create --spec-file act-runner-1.yml --deployment-dir ~/opt/deployments/act-runner-1 $ laconic-so deployment --dir ~/opt/deployments/act-runner-1 up -$ laconic-so --stack act-runner deploy create --spec-file act-runner.yml --deployment-dir ~/opt/deployments/act-runner-2 -$ echo "CERC_GITEA_RUNNER_REGISTRATION_TOKEN=BAR" >> ~/opt/deployments/act-runner-2/config.env +$ laconic-so --stack act-runner deploy init --output act-runner-2.yml --config CERC_GITEA_RUNNER_REGISTRATION_TOKEN=BAR +$ laconic-so --stack act-runner deploy create --spec-file act-runner-2.yml --deployment-dir ~/opt/deployments/act-runner-2 $ laconic-so deployment --dir ~/opt/deployments/act-runner-2 up ``` From d854dd5c81f391e7aca6f49c379ef953999c7b50 Mon Sep 17 00:00:00 2001 From: Ian Date: Mon, 30 Oct 2023 15:44:46 -0400 Subject: [PATCH 09/62] Update fixturenet-laconicd.yml --- .github/workflows/fixturenet-laconicd.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/fixturenet-laconicd.yml b/.github/workflows/fixturenet-laconicd.yml index 5c712e9b..a16c1fe6 100644 --- a/.github/workflows/fixturenet-laconicd.yml +++ b/.github/workflows/fixturenet-laconicd.yml @@ -12,12 +12,6 @@ jobs: name: "Run fixturenet-laconicd test suite" runs-on: ubuntu-latest steps: - - name: 'Setup jq' - run: apt-get install jq -y - - name: 'Check jq' - run: | - which jq - jq --version - name: "Clone project repository" uses: actions/checkout@v3 - name: "Install Python" From fd5779f967565994d947e43ae2e20c3f43a39957 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 31 Oct 2023 12:29:19 -0500 Subject: [PATCH 10/62] Fix KeyError accessing config. (#620) --- app/deploy/deployment_create.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/deploy/deployment_create.py b/app/deploy/deployment_create.py index a7cbe57e..04fdde4a 100644 --- a/app/deploy/deployment_create.py +++ b/app/deploy/deployment_create.py @@ -261,7 +261,7 @@ def init(ctx, config, output, map_ports_to_host): config_variables = _parse_config_variables(config) if config_variables: # Implement merge, since update() overwrites - orig_config = spec_file_content["config"] + orig_config = spec_file_content.get("config", {}) new_config = config_variables["config"] merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) From 0f93d30d54f8b41d317e1df6303816fde3d0dd04 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Fri, 3 Nov 2023 17:02:13 -0600 Subject: [PATCH 11/62] Basic volume support (#622) --- app/deploy/k8s/cluster_info.py | 28 +++++++++++++++++++- app/deploy/k8s/deploy_k8s.py | 25 +++++++++++++----- app/deploy/k8s/helpers.py | 47 ++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 8 deletions(-) diff --git a/app/deploy/k8s/cluster_info.py b/app/deploy/k8s/cluster_info.py index 540f5f8c..dd6df456 100644 --- a/app/deploy/k8s/cluster_info.py +++ b/app/deploy/k8s/cluster_info.py @@ -18,6 +18,7 @@ from typing import Any, List, Set from app.opts import opts from app.util import get_yaml +from app.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files class ClusterInfo: @@ -47,6 +48,28 @@ class ClusterInfo: if opts.o.debug: print(f"image_set: {self.image_set}") + def get_pvcs(self): + result = [] + volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + if opts.o.debug: + print(f"Volumes: {volumes}") + for volume_name in volumes: + spec = client.V1PersistentVolumeClaimSpec( + storage_class_name="standard", + access_modes=["ReadWriteOnce"], + resources=client.V1ResourceRequirements( + requests={"storage": "2Gi"} + ) + ) + pvc = client.V1PersistentVolumeClaim( + metadata=client.V1ObjectMeta(name=volume_name, + labels={"volume-label": volume_name}), + spec=spec, + ) + result.append(pvc) + return result + + # to suit the deployment, and also annotate the container specs to point at said volumes def get_deployment(self): containers = [] for pod_name in self.parsed_pod_yaml_map: @@ -56,19 +79,22 @@ class ClusterInfo: container_name = service_name service_info = services[service_name] image = service_info["image"] + volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) container = client.V1Container( name=container_name, image=image, ports=[client.V1ContainerPort(container_port=80)], + volume_mounts=volume_mounts, resources=client.V1ResourceRequirements( requests={"cpu": "100m", "memory": "200Mi"}, limits={"cpu": "500m", "memory": "500Mi"}, ), ) containers.append(container) + volumes = volumes_for_pod_files(self.parsed_pod_yaml_map) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": self.app_name}), - spec=client.V1PodSpec(containers=containers), + spec=client.V1PodSpec(containers=containers, volumes=volumes), ) spec = client.V1DeploymentSpec( replicas=1, template=template, selector={ diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py index bb1fcd87..25e0f485 100644 --- a/app/deploy/k8s/deploy_k8s.py +++ b/app/deploy/k8s/deploy_k8s.py @@ -26,6 +26,7 @@ class K8sDeployer(Deployer): name: str = "k8s" core_api: client.CoreV1Api apps_api: client.AppsV1Api + k8s_namespace: str = "default" kind_cluster_name: str cluster_info : ClusterInfo @@ -49,17 +50,27 @@ class K8sDeployer(Deployer): self.connect_api() # Ensure the referenced containers are copied into kind load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + # Figure out the PVCs for this deployment + pvcs = self.cluster_info.get_pvcs() + for pvc in pvcs: + if opts.o.debug: + print(f"Sending this: {pvc}") + pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) + if opts.o.debug: + print("PVCs created:") + print(f"{pvc_resp}") # Process compose files into a Deployment deployment = self.cluster_info.get_deployment() # Create the k8s objects - resp = self.apps_api.create_namespaced_deployment( - body=deployment, namespace="default" - ) - if opts.o.debug: - print("Deployment created.\n") - print(f"{resp.metadata.namespace} {resp.metadata.name} \ - {resp.metadata.generation} {resp.spec.template.spec.containers[0].image}") + print(f"Sending this: {deployment}") + deployment_resp = self.apps_api.create_namespaced_deployment( + body=deployment, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("Deployment created:") + print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ + {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") def down(self, timeout, volumes): # Delete the k8s objects diff --git a/app/deploy/k8s/helpers.py b/app/deploy/k8s/helpers.py index 731d667d..3ff5e2b7 100644 --- a/app/deploy/k8s/helpers.py +++ b/app/deploy/k8s/helpers.py @@ -55,3 +55,50 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str): def log_stream_from_string(s: str): # Note response has to be UTF-8 encoded because the caller expects to decode it yield ("ignore", s.encode()) + + +def named_volumes_from_pod_files(parsed_pod_files): + # Parse the compose files looking for named volumes + named_volumes = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume in volumes.keys(): + # Volume definition looks like: + # 'laconicd-data': None + named_volumes.append(volume) + return named_volumes + + +def volume_mounts_for_service(parsed_pod_files, service): + result = [] + # Find the service + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + if service_name == service: + service_obj = services[service_name] + if "volumes" in service_obj: + volumes = service_obj["volumes"] + for mount_string in volumes: + # Looks like: test-data:/data + (volume_name, mount_path) = mount_string.split(":") + volume_device = client.V1VolumeMount(mount_path=mount_path, name=volume_name) + result.append(volume_device) + return result + + +def volumes_for_pod_files(parsed_pod_files): + result = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume_name in volumes.keys(): + claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=volume_name) + volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) + result.append(volume) + return result From e989368793a3d71bbb3a0d65c96fb4a400332d42 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Sun, 5 Nov 2023 23:21:53 -0700 Subject: [PATCH 12/62] Add generated kind config (#623) --- app/deploy/compose/deploy_docker.py | 14 +++- app/deploy/deployer.py | 8 ++ app/deploy/deployer_factory.py | 15 +++- app/deploy/deployment_create.py | 5 ++ app/deploy/k8s/cluster_info.py | 9 +-- app/deploy/k8s/deploy_k8s.py | 24 +++++- app/deploy/k8s/helpers.py | 114 +++++++++++++++++++++++++++- 7 files changed, 172 insertions(+), 17 deletions(-) diff --git a/app/deploy/compose/deploy_docker.py b/app/deploy/compose/deploy_docker.py index 6a70f318..e8ee4b9f 100644 --- a/app/deploy/compose/deploy_docker.py +++ b/app/deploy/compose/deploy_docker.py @@ -13,8 +13,9 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from pathlib import Path from python_on_whales import DockerClient, DockerException -from app.deploy.deployer import Deployer, DeployerException +from app.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator class DockerDeployer(Deployer): @@ -65,3 +66,14 @@ class DockerDeployer(Deployer): return self.docker.run(image=image, command=command, user=user, volumes=volumes, entrypoint=entrypoint) except DockerException as e: raise DeployerException(e) + + +class DockerDeployerConfigGenerator(DeployerConfigGenerator): + config_file_name: str = "kind-config.yml" + + def __init__(self) -> None: + super().__init__() + + # Nothing needed at present for the docker deployer + def generate(self, deployment_dir: Path): + pass diff --git a/app/deploy/deployer.py b/app/deploy/deployer.py index 51b6010d..68b0088a 100644 --- a/app/deploy/deployer.py +++ b/app/deploy/deployer.py @@ -14,6 +14,7 @@ # along with this program. If not, see . from abc import ABC, abstractmethod +from pathlib import Path class Deployer(ABC): @@ -50,3 +51,10 @@ class Deployer(ABC): class DeployerException(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) + + +class DeployerConfigGenerator(ABC): + + @abstractmethod + def generate(self, deployment_dir: Path): + pass diff --git a/app/deploy/deployer_factory.py b/app/deploy/deployer_factory.py index de89b72c..0c0ef69d 100644 --- a/app/deploy/deployer_factory.py +++ b/app/deploy/deployer_factory.py @@ -13,11 +13,20 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.deploy.k8s.deploy_k8s import K8sDeployer -from app.deploy.compose.deploy_docker import DockerDeployer +from app.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator +from app.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator -def getDeployer(type, compose_files, compose_project_name, compose_env_file): +def getDeployerConfigGenerator(type: str): + if type == "compose" or type is None: + return DockerDeployerConfigGenerator() + elif type == "k8s": + return K8sDeployerConfigGenerator() + else: + print(f"ERROR: deploy-to {type} is not valid") + + +def getDeployer(type: str, compose_files, compose_project_name, compose_env_file): if type == "compose" or type is None: return DockerDeployer(compose_files, compose_project_name, compose_env_file) elif type == "k8s": diff --git a/app/deploy/deployment_create.py b/app/deploy/deployment_create.py index 04fdde4a..4f297286 100644 --- a/app/deploy/deployment_create.py +++ b/app/deploy/deployment_create.py @@ -24,6 +24,7 @@ import sys from app.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths) from app.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand +from app.deploy.deployer_factory import getDeployerConfigGenerator def _make_default_deployment_dir(): @@ -366,6 +367,10 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): deployment_command_context = ctx.obj deployment_command_context.stack = stack_name deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context) + # Call the deployer to generate any deployer-specific files (e.g. for kind) + deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"]) + # TODO: make deployment_dir a Path above + deployer_config_generator.generate(Path(deployment_dir)) call_stack_deploy_create(deployment_context, [network_dir, initial_peers]) diff --git a/app/deploy/k8s/cluster_info.py b/app/deploy/k8s/cluster_info.py index dd6df456..dfb1ef53 100644 --- a/app/deploy/k8s/cluster_info.py +++ b/app/deploy/k8s/cluster_info.py @@ -17,8 +17,8 @@ from kubernetes import client from typing import Any, List, Set from app.opts import opts -from app.util import get_yaml from app.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files +from app.deploy.k8s.helpers import parsed_pod_files_map_from_file_names class ClusterInfo: @@ -31,12 +31,7 @@ class ClusterInfo: pass def int_from_pod_files(self, pod_files: List[str]): - for pod_file in pod_files: - with open(pod_file, "r") as pod_file_descriptor: - parsed_pod_file = get_yaml().load(pod_file_descriptor) - self.parsed_pod_yaml_map[pod_file] = parsed_pod_file - if opts.o.debug: - print(f"parsed_pod_yaml_map: {self.parsed_pod_yaml_map}") + self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods for pod_name in self.parsed_pod_yaml_map: pod = self.parsed_pod_yaml_map[pod_name] diff --git a/app/deploy/k8s/deploy_k8s.py b/app/deploy/k8s/deploy_k8s.py index 25e0f485..16b5f0b4 100644 --- a/app/deploy/k8s/deploy_k8s.py +++ b/app/deploy/k8s/deploy_k8s.py @@ -13,11 +13,12 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from pathlib import Path from kubernetes import client, config -from app.deploy.deployer import Deployer +from app.deploy.deployer import Deployer, DeployerConfigGenerator from app.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind -from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string +from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config from app.deploy.k8s.cluster_info import ClusterInfo from app.opts import opts @@ -46,7 +47,8 @@ class K8sDeployer(Deployer): def up(self, detach, services): # Create the kind cluster - create_cluster(self.kind_cluster_name) + # HACK: pass in the config file path here + create_cluster(self.kind_cluster_name, "./test-deployment-dir/kind-config.yml") self.connect_api() # Ensure the referenced containers are copied into kind load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) @@ -108,3 +110,19 @@ class K8sDeployer(Deployer): def run(self, image, command, user, volumes, entrypoint=None): # We need to figure out how to do this -- check why we're being called first pass + + +class K8sDeployerConfigGenerator(DeployerConfigGenerator): + config_file_name: str = "kind-config.yml" + + def __init__(self) -> None: + super().__init__() + + def generate(self, deployment_dir: Path): + # Check the file isn't already there + # Get the config file contents + content = generate_kind_config(deployment_dir) + config_file = deployment_dir.joinpath(self.config_file_name) + # Write the file + with open(config_file, "w") as output_file: + output_file.write(content) diff --git a/app/deploy/k8s/helpers.py b/app/deploy/k8s/helpers.py index 3ff5e2b7..6194ac5e 100644 --- a/app/deploy/k8s/helpers.py +++ b/app/deploy/k8s/helpers.py @@ -14,10 +14,12 @@ # along with this program. If not, see . from kubernetes import client +from pathlib import Path import subprocess -from typing import Set +from typing import Any, Set from app.opts import opts +from app.util import get_yaml def _run_command(command: str): @@ -28,8 +30,8 @@ def _run_command(command: str): print(f"Result: {result}") -def create_cluster(name: str): - _run_command(f"kind create cluster --name {name}") +def create_cluster(name: str, config_file: str): + _run_command(f"kind create cluster --name {name} --config {config_file}") def destroy_cluster(name: str): @@ -102,3 +104,109 @@ def volumes_for_pod_files(parsed_pod_files): volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim) result.append(volume) return result + + +def _get_host_paths_for_volumes(parsed_pod_files): + result = {} + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "volumes" in parsed_pod_file: + volumes = parsed_pod_file["volumes"] + for volume_name in volumes.keys(): + volume_definition = volumes[volume_name] + host_path = volume_definition["driver_opts"]["device"] + result[volume_name] = host_path + return result + + +def parsed_pod_files_map_from_file_names(pod_files): + parsed_pod_yaml_map : Any = {} + for pod_file in pod_files: + with open(pod_file, "r") as pod_file_descriptor: + parsed_pod_file = get_yaml().load(pod_file_descriptor) + parsed_pod_yaml_map[pod_file] = parsed_pod_file + if opts.o.debug: + print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}") + return parsed_pod_yaml_map + + +def _generate_kind_mounts(parsed_pod_files): + volume_definitions = [] + volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + service_obj = services[service_name] + if "volumes" in service_obj: + volumes = service_obj["volumes"] + for mount_string in volumes: + # Looks like: test-data:/data + (volume_name, mount_path) = mount_string.split(":") + volume_definitions.append( + f" - hostPath: {volume_host_path_map[volume_name]}\n containerPath: /var/local-path-provisioner" + ) + return ( + "" if len(volume_definitions) == 0 else ( + " extraMounts:\n" + f"{''.join(volume_definitions)}" + ) + ) + + +def _generate_kind_port_mappings(parsed_pod_files): + port_definitions = [] + for pod in parsed_pod_files: + parsed_pod_file = parsed_pod_files[pod] + if "services" in parsed_pod_file: + services = parsed_pod_file["services"] + for service_name in services: + service_obj = services[service_name] + if "ports" in service_obj: + ports = service_obj["ports"] + for port_string in ports: + # TODO handle the complex cases + # Looks like: 80 or something more complicated + port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}") + return ( + "" if len(port_definitions) == 0 else ( + " extraPortMappings:\n" + f"{''.join(port_definitions)}" + ) + ) + + +# This needs to know: +# The service ports for the cluster +# The bind mounted volumes for the cluster +# +# Make ports like this: +# extraPortMappings: +# - containerPort: 80 +# hostPort: 80 +# # optional: set the bind address on the host +# # 0.0.0.0 is the current default +# listenAddress: "127.0.0.1" +# # optional: set the protocol to one of TCP, UDP, SCTP. +# # TCP is the default +# protocol: TCP +# Make bind mounts like this: +# extraMounts: +# - hostPath: /path/to/my/files +# containerPath: /files +def generate_kind_config(deployment_dir: Path): + compose_file_dir = deployment_dir.joinpath("compose") + # TODO: this should come from the stack file, not this way + pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()] + parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files) + port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map) + mounts_yml = _generate_kind_mounts(parsed_pod_files_map) + return ( + "kind: Cluster\n" + "apiVersion: kind.x-k8s.io/v1alpha4\n" + "nodes:\n" + "- role: control-plane\n" + f"{port_mappings_yml}\n" + f"{mounts_yml}\n" + ) From 4456e70c936871b2fc416fa9ca6d61b6f4e17390 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 7 Nov 2023 00:06:55 -0700 Subject: [PATCH 13/62] Rename app -> stack_orchestrator (#625) --- .gitignore | 2 +- README.md | 10 +++++----- docs/adding-a-new-stack.md | 12 ++++++------ scripts/create_build_tag_file.sh | 4 ++-- setup.py | 4 ++-- {app => stack_orchestrator}/__init__.py | 0 {app => stack_orchestrator}/__main__.py | 0 {app => stack_orchestrator}/base.py | 2 +- {app => stack_orchestrator}/build/__init__.py | 0 .../build/build_containers.py | 6 +++--- .../build/build_npms.py | 6 +++--- {app => stack_orchestrator}/command_types.py | 0 {app => stack_orchestrator}/data/__init__.py | 0 .../docker-compose-contract-sushiswap.yml | 0 .../data/compose/docker-compose-contract.yml | 0 .../data/compose/docker-compose-eth-probe.yml | 0 ...cker-compose-eth-statediff-fill-service.yml | 0 .../docker-compose-fixturenet-eth-metrics.yml | 0 .../compose/docker-compose-fixturenet-eth.yml | 0 ...cker-compose-fixturenet-laconic-console.yml | 0 .../docker-compose-fixturenet-laconicd.yml | 0 .../docker-compose-fixturenet-lotus.yml | 0 .../docker-compose-fixturenet-optimism.yml | 0 .../docker-compose-fixturenet-plugeth.yml | 0 .../docker-compose-fixturenet-pocket.yml | 0 ...ompose-fixturenet-sushiswap-subgraph-v3.yml | 0 .../data/compose/docker-compose-foundry.yml | 0 .../docker-compose-go-ethereum-foundry.yml | 0 .../data/compose/docker-compose-go-nitro.yml | 0 .../data/compose/docker-compose-graph-node.yml | 0 .../docker-compose-ipld-eth-beacon-db.yml | 0 .../docker-compose-ipld-eth-beacon-indexer.yml | 0 .../compose/docker-compose-ipld-eth-db.yml | 0 ...docker-compose-ipld-eth-server-payments.yml | 0 .../compose/docker-compose-ipld-eth-server.yml | 0 .../data/compose/docker-compose-keycloak.yml | 0 .../data/compose/docker-compose-kubo.yml | 0 .../compose/docker-compose-laconic-dot-com.yml | 0 .../data/compose/docker-compose-laconicd.yml | 0 .../data/compose/docker-compose-lasso.yml | 0 .../docker-compose-mainnet-eth-api-proxy.yml | 0 .../docker-compose-mainnet-eth-ipld-eth-db.yml | 0 ...ker-compose-mainnet-eth-ipld-eth-server.yml | 0 .../docker-compose-mainnet-eth-keycloak.yml | 0 .../docker-compose-mainnet-eth-metrics.yml | 0 .../docker-compose-mainnet-eth-plugeth.yml | 0 .../compose/docker-compose-mainnet-eth.yml | 0 .../docker-compose-mainnet-go-opera.yml | 0 .../docker-compose-mainnet-laconicd.yml | 0 .../compose/docker-compose-mobymask-app-v3.yml | 0 .../compose/docker-compose-mobymask-app.yml | 0 .../compose/docker-compose-mobymask-snap.yml | 0 .../compose/docker-compose-nitro-contracts.yml | 0 .../docker-compose-nitro-rpc-client.yml | 0 .../compose/docker-compose-peer-test-app.yml | 0 .../compose/docker-compose-ponder-indexer.yml | 0 .../compose/docker-compose-ponder-watcher.yml | 0 .../data/compose/docker-compose-reth.yml | 0 .../docker-compose-sushiswap-subgraph-v3.yml | 0 .../data/compose/docker-compose-test.yml | 0 .../data/compose/docker-compose-tx-spammer.yml | 0 .../compose/docker-compose-watcher-azimuth.yml | 0 .../compose/docker-compose-watcher-erc20.yml | 0 .../compose/docker-compose-watcher-erc721.yml | 0 .../compose/docker-compose-watcher-gelato.yml | 0 .../docker-compose-watcher-mobymask-v2.yml | 0 .../docker-compose-watcher-mobymask-v3.yml | 0 .../docker-compose-watcher-mobymask.yml | 0 .../docker-compose-watcher-sushiswap.yml | 0 .../docker-compose-watcher-uniswap-v3.yml | 0 .../deploy-core-contracts.sh | 0 .../deploy-periphery-contracts.sh | 0 .../contract-sushiswap/deployment-params.env | 0 .../etc/dashboards/fixturenet_dashboard.json | 0 .../etc/provisioning/dashboards/dashboards.yml | 0 .../provisioning/datasources/prometheus.yml | 0 .../prometheus/etc/prometheus.yml | 0 .../config/fixturenet-eth/fixturenet-eth.env | 0 .../fixturenet-laconicd/create-fixturenet.sh | 0 .../fixturenet-laconicd/export-myaddress.sh | 0 .../config/fixturenet-laconicd/export-mykey.sh | 0 .../registry-cli-config-template.yml | 0 .../config/fixturenet-lotus/fund-account.sh | 0 .../data/config/fixturenet-lotus/lotus-env.env | 0 .../config/fixturenet-lotus/setup-miner.sh | 0 .../data/config/fixturenet-lotus/setup-node.sh | 0 .../fixturenet-optimism/generate-l2-config.sh | 0 .../config/fixturenet-optimism/l1-params.env | 0 .../optimism-contracts/run.sh | 0 .../optimism-contracts/update-config.js | 0 .../fixturenet-optimism/run-op-batcher.sh | 0 .../config/fixturenet-optimism/run-op-geth.sh | 0 .../config/fixturenet-optimism/run-op-node.sh | 0 .../fixturenet-optimism/run-op-proposer.sh | 0 .../data/config/fixturenet-pocket/chains.json | 0 .../fixturenet-pocket/create-fixturenet.sh | 0 .../data/config/fixturenet-pocket/genesis.json | 0 .../lotus-fixturenet.js.template | 0 .../run-blocks.sh | 0 .../fixturenet-sushiswap-subgraph-v3/run-v3.sh | 0 .../data/config/foundry/foundry.toml | 0 .../data/config/go-nitro/run-nitro-node.sh | 0 .../config/ipld-eth-beacon-indexer/indexer.env | 0 .../data/config/ipld-eth-server/chain.json | 0 .../data/config/ipld-eth-server/entrypoint.sh | 0 .../config/keycloak/import/cerc-realm.json | 0 .../data/config/keycloak/keycloak.env | 0 .../config/keycloak/nginx/keycloak_proxy.conf | 0 .../config/mainnet-eth-api-proxy/ethpxy.env | 0 .../data/config/mainnet-eth-ipld-eth-db/db.env | 0 .../mainnet-eth-ipld-eth-server/config.toml | 0 .../config/mainnet-eth-ipld-eth-server/srv.env | 0 .../import/cerc-realm.json | 0 .../config/mainnet-eth-keycloak/keycloak.env | 0 .../config/mainnet-eth-keycloak/nginx.example | 0 .../scripts/keycloak-mirror/keycloak-mirror.py | 0 .../scripts/keycloak-mirror/requirements.txt | 0 .../config/mainnet-eth-keycloak/ui/config.yml | 0 .../grafana/etc/dashboards/eth_dashboard.json | 0 .../etc/provisioning/dashboards/dashboards.yml | 0 .../provisioning/datasources/prometheus.yml | 0 .../config/mainnet-eth-metrics/metrics.env | 0 .../prometheus/etc/prometheus.yml | 0 .../data/config/mainnet-eth-plugeth/geth.env | 0 .../config/mainnet-eth-plugeth/lighthouse.env | 0 .../mainnet-eth-plugeth/scripts/run-geth.sh | 0 .../scripts/run-lighthouse.sh | 0 .../data/config/mainnet-eth/geth.env | 0 .../data/config/mainnet-eth/lighthouse.env | 0 .../config/mainnet-eth/scripts/run-geth.sh | 0 .../mainnet-eth/scripts/run-lighthouse.sh | 0 .../data/config/mainnet-go-opera/go-opera.env | 0 .../data/config/mainnet-go-opera/start-node.sh | 0 .../registry-cli-config-template.yml | 0 .../scripts/export-myaddress.sh | 0 .../mainnet-laconicd/scripts/export-mykey.sh | 0 .../mainnet-laconicd/scripts/run-laconicd.sh | 0 .../data/config/network/wait-for-it.sh | 0 .../data/config/nitro-contracts/deploy.sh | 0 .../hardhat-tasks/rekey-json.ts | 0 .../hardhat-tasks/send-balance.ts | 0 .../verify-contract-deployment.ts | 0 .../data/config/ponder/base-rates-config.json | 0 .../config/ponder/deploy-erc20-contract.sh | 0 .../data/config/ponder/ponder-start.sh | 0 .../config/ponder/ponder.indexer-1.config.ts | 0 .../config/ponder/ponder.indexer-2.config.ts | 0 .../config/ponder/ponder.watcher.config.ts | 0 .../postgresql/create-pg-stat-statements.sql | 0 .../multiple-postgressql-databases.sh | 0 .../data/config/reth/start-lighthouse.sh | 0 .../data/config/reth/start-reth.sh | 0 .../config/sushiswap-subgraph-v3/filecoin.js | 0 .../config/sushiswap-subgraph-v3/run-blocks.sh | 0 .../config/sushiswap-subgraph-v3/run-v3.sh | 0 .../data/config/tx-spammer/tx-spammer.env | 0 .../watcher-azimuth/gateway-watchers.json | 0 .../data/config/watcher-azimuth/merge-toml.js | 0 .../config/watcher-azimuth/start-server.sh | 0 .../watcher-config-template.toml | 0 .../config/watcher-azimuth/watcher-params.env | 0 .../config/watcher-erc20/erc20-watcher.toml | 0 .../config/watcher-erc721/erc721-watcher.toml | 0 .../create-and-import-checkpoint.sh | 0 .../config/watcher-gelato/start-job-runner.sh | 0 .../data/config/watcher-gelato/start-server.sh | 0 .../watcher-config-template.toml | 0 .../config/watcher-gelato/watcher-params.env | 0 .../deploy-and-generate-invite.sh | 0 .../watcher-mobymask-v2/generate-peer-ids.sh | 0 .../mobymask-app-config.json | 0 .../watcher-mobymask-v2/mobymask-app-start.sh | 0 .../watcher-mobymask-v2/mobymask-params.env | 0 .../watcher-mobymask-v2/optimism-params.env | 0 .../watcher-mobymask-v2/secrets-template.json | 0 .../watcher-mobymask-v2/set-tests-env.sh | 0 .../config/watcher-mobymask-v2/start-server.sh | 0 .../watcher-mobymask-v2/test-app-config.json | 0 .../watcher-mobymask-v2/test-app-start.sh | 0 .../watcher-config-template.toml | 0 .../deploy-and-generate-invite.sh | 0 ...CT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json | 0 ...QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json | 0 ...gkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json | 0 .../watcher-mobymask-v3/mobymask-app-start.sh | 0 .../watcher-mobymask-v3/mobymask-params.env | 0 .../config/watcher-mobymask-v3/start-server.sh | 0 .../watcher-config-rates.toml | 0 .../watcher-config-template.toml | 0 .../watcher-mobymask/mobymask-watcher-db.sql | 0 .../watcher-mobymask/mobymask-watcher.toml | 0 .../watcher-sushiswap/erc20-watcher.toml | 0 .../config/watcher-sushiswap/lotus-params.env | 0 .../sushi-info-watcher-test.toml | 0 .../watcher-sushiswap/sushi-info-watcher.toml | 0 .../watcher-sushiswap/sushi-watcher-test.toml | 0 .../watcher-sushiswap/sushi-watcher.toml | 0 .../watcher-uniswap-v3/erc20-watcher.toml | 0 .../data/config/watcher-uniswap-v3/run.sh | 0 .../watcher-uniswap-v3/uni-info-watcher.toml | 0 .../config/watcher-uniswap-v3/uni-watcher.toml | 0 .../watcher-uniswap-v3/watch-contract.sh | 0 .../data/container-build/build-base.sh | 0 .../cerc-act-runner-task-executor/build.sh | 0 .../container-build/cerc-act-runner/build.sh | 0 .../cerc-builder-gerbil/Dockerfile | 0 .../cerc-builder-gerbil/README.md | 0 .../cerc-builder-gerbil/entrypoint.sh | 0 .../install-dependencies.sh | 0 .../container-build/cerc-builder-js/Dockerfile | 0 .../container-build/cerc-builder-js/README.md | 0 .../build-npm-package-local-dependencies.sh | 0 .../cerc-builder-js/build-npm-package.sh | 0 .../cerc-builder-js/check-uid.sh | 0 .../cerc-builder-js/entrypoint.sh | 0 .../yarn-local-registry-fixup.sh | 0 .../cerc-eth-api-proxy/build.sh | 0 .../container-build/cerc-eth-probe/build.sh | 0 .../cerc-eth-statediff-fill-service/build.sh | 0 .../cerc-eth-statediff-service/build.sh | 0 .../cerc-fixturenet-eth-genesis/Dockerfile | 0 .../cerc-fixturenet-eth-genesis/build.sh | 0 .../genesis/Makefile | 0 .../genesis/accounts/import_keys.sh | 0 .../genesis/accounts/mnemonic_to_csv.py | 0 .../genesis/el/build_el.sh | 0 .../genesis/el/el-config.yaml | 0 .../cerc-fixturenet-eth-geth/Dockerfile | 0 .../cerc-fixturenet-eth-geth/build.sh | 0 .../cerc-fixturenet-eth-geth/run-el.sh | 0 .../cerc-fixturenet-eth-lighthouse/Dockerfile | 0 .../cerc-fixturenet-eth-lighthouse/build.sh | 0 .../genesis/Makefile | 0 .../genesis/cl/beacon_node.sh | 0 .../genesis/cl/bootnode.sh | 0 .../genesis/cl/build_cl.sh | 0 .../genesis/cl/ready.sh | 0 .../genesis/cl/reset_genesis_time.sh | 0 .../genesis/cl/validator_client.sh | 0 .../genesis/cl/vars.env | 0 .../cerc-fixturenet-eth-lighthouse/run-cl.sh | 0 .../scripts/export-ethdb.sh | 0 .../scripts/status-internal.sh | 0 .../scripts/status.sh | 0 .../cerc-fixturenet-plugeth-plugeth/Dockerfile | 0 .../cerc-fixturenet-plugeth-plugeth/build.sh | 0 .../data/container-build/cerc-foundry/build.sh | 0 .../cerc-go-ethereum-foundry/Dockerfile | 0 .../cerc-go-ethereum-foundry/build.sh | 0 .../deploy-local-network.sh | 0 .../genesis-automine.json | 0 .../cerc-go-ethereum-foundry/genesis.json | 0 .../start-private-network.sh | 0 .../stateful/foundry.toml | 0 .../stateful/lib/ds-test/LICENSE | 0 .../stateful/lib/ds-test/Makefile | 0 .../stateful/lib/ds-test/default.nix | 0 .../stateful/lib/ds-test/demo/demo.sol | 0 .../stateful/lib/ds-test/src/test.sol | 0 .../stateful/src/Stateful.sol | 0 .../stateful/src/test/Stateful.t.sol | 0 .../container-build/cerc-go-ethereum/build.sh | 0 .../container-build/cerc-go-nitro/Dockerfile | 0 .../container-build/cerc-go-nitro/build.sh | 0 .../container-build/cerc-go-opera/build.sh | 0 .../container-build/cerc-graph-node/build.sh | 0 .../cerc-ipld-eth-beacon-db/build.sh | 0 .../cerc-ipld-eth-beacon-indexer/build.sh | 0 .../container-build/cerc-ipld-eth-db/build.sh | 0 .../cerc-ipld-eth-server/build.sh | 0 .../cerc-keycloak-reg-api/build.sh | 0 .../cerc-keycloak-reg-ui/build.sh | 0 .../container-build/cerc-keycloak/Dockerfile | 0 .../container-build/cerc-keycloak/build.sh | 0 .../cerc-laconic-console-host/Dockerfile | 0 .../cerc-laconic-console-host/build.sh | 0 .../cerc-laconic-console-host/config.yml | 0 .../cerc-laconic-dot-com/build.sh | 0 .../cerc-laconic-registry-cli/Dockerfile | 0 .../cerc-laconic-registry-cli/build.sh | 0 .../create-demo-records.sh | 0 .../demo-records/demo-record-1.yml | 0 .../demo-records/demo-record-2.yml | 0 .../demo-records/demo-record-3.yml | 0 .../demo-records/demo-record-4.yml | 0 .../demo-records/demo-record-5.yml | 0 .../demo-records/demo-record-6.yml | 0 .../import-address.sh | 0 .../cerc-laconic-registry-cli/import-key.sh | 0 .../container-build/cerc-laconicd/build.sh | 0 .../data/container-build/cerc-lasso/build.sh | 0 .../cerc-lighthouse-cli/build.sh | 0 .../container-build/cerc-lighthouse/Dockerfile | 0 .../container-build/cerc-lighthouse/build.sh | 0 .../cerc-lighthouse/start-lighthouse.sh | 0 .../data/container-build/cerc-lotus/Dockerfile | 0 .../data/container-build/cerc-lotus/build.sh | 0 .../cerc-mobymask-snap/Dockerfile | 0 .../cerc-mobymask-snap/build.sh | 0 .../cerc-mobymask-ui/Dockerfile | 0 .../container-build/cerc-mobymask-ui/build.sh | 0 .../container-build/cerc-mobymask/Dockerfile | 0 .../container-build/cerc-mobymask/build.sh | 0 .../cerc-nitro-contracts/Dockerfile | 0 .../cerc-nitro-contracts/build.sh | 0 .../cerc-nitro-rpc-client/Dockerfile | 0 .../cerc-nitro-rpc-client/build.sh | 0 .../cerc-optimism-contracts/Dockerfile | 0 .../cerc-optimism-contracts/build.sh | 0 .../cerc-optimism-l2geth/build.sh | 0 .../cerc-optimism-op-batcher/Dockerfile | 0 .../cerc-optimism-op-batcher/build.sh | 0 .../cerc-optimism-op-node/Dockerfile | 0 .../cerc-optimism-op-node/build.sh | 0 .../cerc-optimism-op-proposer/Dockerfile | 0 .../cerc-optimism-op-proposer/build.sh | 0 .../cerc-plugeth-statediff/build.sh | 0 .../cerc-plugeth-with-plugins/Dockerfile | 0 .../cerc-plugeth-with-plugins/build.sh | 0 .../data/container-build/cerc-plugeth/build.sh | 0 .../data/container-build/cerc-pocket/build.sh | 0 .../container-build/cerc-ponder/Dockerfile | 0 .../data/container-build/cerc-ponder/build.sh | 0 .../container-build/cerc-react-peer/Dockerfile | 0 .../cerc-react-peer/apply-webapp-config.sh | 0 .../container-build/cerc-react-peer/build.sh | 0 .../cerc-react-peer/start-serving-app.sh | 0 .../data/container-build/cerc-reth/build.sh | 0 .../cerc-sushiswap-subgraphs/Dockerfile | 0 .../cerc-sushiswap-subgraphs/build.sh | 0 .../cerc-sushiswap-v3-core/Dockerfile | 0 .../cerc-sushiswap-v3-core/build.sh | 0 .../cerc-sushiswap-v3-periphery/Dockerfile | 0 .../cerc-sushiswap-v3-periphery/build.sh | 0 .../cerc-test-container/Dockerfile | 0 .../cerc-test-container/build.sh | 0 .../container-build/cerc-test-container/run.sh | 0 .../cerc-test-contract/build.sh | 0 .../container-build/cerc-tx-spammer/build.sh | 0 .../cerc-uniswap-v3-info/Dockerfile | 0 .../cerc-uniswap-v3-info/build.sh | 0 .../cerc-watcher-azimuth/Dockerfile | 0 .../cerc-watcher-azimuth/build.sh | 0 .../cerc-watcher-erc20/Dockerfile | 0 .../cerc-watcher-erc20/build.sh | 0 .../cerc-watcher-erc721/Dockerfile | 0 .../cerc-watcher-erc721/build.sh | 0 .../cerc-watcher-gelato/Dockerfile | 0 .../cerc-watcher-gelato/build.sh | 0 .../cerc-watcher-mobymask-v2/Dockerfile | 0 .../cerc-watcher-mobymask-v2/build.sh | 0 .../cerc-watcher-mobymask-v3/Dockerfile | 0 .../cerc-watcher-mobymask-v3/build.sh | 0 .../cerc-watcher-mobymask/Dockerfile | 0 .../cerc-watcher-mobymask/build.sh | 0 .../cerc-watcher-sushiswap/Dockerfile | 0 .../cerc-watcher-sushiswap/build.sh | 0 .../container-build/cerc-watcher-ts/Dockerfile | 0 .../container-build/cerc-watcher-ts/build.sh | 0 .../cerc-watcher-uniswap-v3/Dockerfile | 0 .../cerc-watcher-uniswap-v3/build.sh | 0 .../cerc-webapp-base/Dockerfile | 0 .../cerc-webapp-base/apply-webapp-config.sh | 0 .../container-build/cerc-webapp-base/build.sh | 0 .../cerc-webapp-base/config.yml | 0 .../cerc-webapp-base/start-serving-app.sh | 0 .../data/container-build/default-build.sh | 0 .../data/container-image-list.txt | 0 .../data/npm-package-list.txt | 0 {app => stack_orchestrator}/data/pod-list.txt | 0 .../data/repository-list.txt | 0 .../data/stacks/act-runner/README.md | 0 .../data/stacks/act-runner/stack.yml | 0 .../data/stacks/azimuth/README.md | 0 .../data/stacks/azimuth/stack.yml | 0 .../data/stacks/build-support/README.md | 0 .../data/stacks/build-support/stack.yml | 0 .../data/stacks/chain-chunker/README.md | 0 .../data/stacks/chain-chunker/stack.yml | 0 .../data/stacks/erc20/README.md | 0 .../data/stacks/erc20/stack.yml | 0 .../data/stacks/erc721/README.md | 0 .../data/stacks/erc721/stack.yml | 0 .../stacks/fixturenet-eth-loaded/README.md | 0 .../stacks/fixturenet-eth-loaded/stack.yml | 0 .../data/stacks/fixturenet-eth-tx/README.md | 0 .../data/stacks/fixturenet-eth-tx/stack.yml | 0 .../data/stacks/fixturenet-eth/README.md | 0 .../data/stacks/fixturenet-eth/stack.yml | 0 .../stacks/fixturenet-laconic-loaded/README.md | 0 .../stacks/fixturenet-laconic-loaded/stack.yml | 0 .../data/stacks/fixturenet-laconicd/README.md | 0 .../data/stacks/fixturenet-laconicd/stack.yml | 0 .../data/stacks/fixturenet-lotus/README.md | 0 .../data/stacks/fixturenet-lotus/stack.yml | 0 .../data/stacks/fixturenet-optimism/README.md | 0 .../data/stacks/fixturenet-optimism/l2-only.md | 0 .../data/stacks/fixturenet-optimism/stack.yml | 0 .../data/stacks/fixturenet-payments/README.md | 0 .../fixturenet-payments/mobymask-demo.md | 0 .../stacks/fixturenet-payments/ponder-demo.md | 0 .../data/stacks/fixturenet-payments/stack.yml | 0 .../stacks/fixturenet-plugeth-tx/README.md | 0 .../stacks/fixturenet-plugeth-tx/stack.yml | 0 .../data/stacks/fixturenet-pocket/README.md | 0 .../data/stacks/fixturenet-pocket/stack.yml | 0 .../fixturenet-sushiswap-subgraph/README.md | 0 .../fixturenet-sushiswap-subgraph/stack.yml | 0 .../data/stacks/gelato/README.md | 0 .../data/stacks/gelato/stack.yml | 0 .../data/stacks/graph-node/README.md | 0 .../data/stacks/graph-node/deploy-subgraph.md | 0 .../data/stacks/graph-node/stack.yml | 0 .../data/stacks/kubo/README.md | 0 .../data/stacks/kubo/stack.yml | 0 .../data/stacks/laconic-dot-com/README.md | 0 .../data/stacks/laconic-dot-com/stack.yml | 0 .../data/stacks/lasso/README.md | 0 .../data/stacks/lasso/stack.yml | 0 .../data/stacks/mainnet-eth-plugeth/README.md | 0 .../mainnet-eth-plugeth/deploy/commands.py | 0 .../data/stacks/mainnet-eth-plugeth/stack.yml | 0 .../data/stacks/mainnet-eth/README.md | 0 .../data/stacks/mainnet-eth/deploy/commands.py | 0 .../data/stacks/mainnet-eth/stack.yml | 0 .../data/stacks/mainnet-go-opera/README.md | 0 .../data/stacks/mainnet-go-opera/stack.yml | 0 .../data/stacks/mainnet-laconic/README.md | 0 .../stacks/mainnet-laconic/deploy/commands.py | 10 +++++----- .../data/stacks/mainnet-laconic/stack.yml | 0 .../test/run-mainnet-laconic-test.sh | 0 .../data/stacks/mobymask-v2/README.md | 0 .../data/stacks/mobymask-v2/demo.md | 0 .../data/stacks/mobymask-v2/mobymask-only.md | 0 .../data/stacks/mobymask-v2/stack.yml | 0 .../mobymask-v2/watcher-p2p-network/watcher.md | 0 .../mobymask-v2/watcher-p2p-network/web-app.md | 0 .../data/stacks/mobymask-v2/web-apps.md | 0 .../data/stacks/mobymask-v3/README.md | 0 .../data/stacks/mobymask-v3/stack.yml | 0 .../data/stacks/mobymask-v3/watcher.md | 0 .../data/stacks/mobymask-v3/web-app.md | 0 .../data/stacks/mobymask/README.md | 0 .../data/stacks/mobymask/stack.yml | 0 .../data/stacks/package-registry/README.md | 0 .../data/stacks/package-registry/stack.yml | 0 .../data/stacks/reth/README.md | 0 .../data/stacks/reth/stack.yml | 0 .../data/stacks/sushiswap-subgraph/README.md | 0 .../data/stacks/sushiswap-subgraph/stack.yml | 0 .../data/stacks/sushiswap/README.md | 0 .../data/stacks/sushiswap/smoke-tests.md | 0 .../data/stacks/sushiswap/stack.yml | 0 .../data/stacks/test/README.md | 0 .../data/stacks/test/deploy/commands.py | 8 ++++---- .../data/stacks/test/stack.yml | 0 .../data/stacks/uniswap-v3/README.md | 0 .../data/stacks/uniswap-v3/stack.yml | 0 {app => stack_orchestrator}/data/version.txt | 0 {app => stack_orchestrator}/deploy/__init__.py | 0 .../deploy/compose/__init__.py | 0 .../deploy/compose/deploy_docker.py | 2 +- {app => stack_orchestrator}/deploy/deploy.py | 16 ++++++++-------- .../deploy/deploy_types.py | 4 ++-- .../deploy/deploy_util.py | 4 ++-- {app => stack_orchestrator}/deploy/deployer.py | 0 .../deploy/deployer_factory.py | 4 ++-- .../deploy/deployment.py | 8 ++++---- .../deploy/deployment_create.py | 9 +++++---- .../deploy/k8s/__init__.py | 0 .../deploy/k8s/cluster_info.py | 6 +++--- .../deploy/k8s/deploy_k8s.py | 10 +++++----- .../deploy/k8s/helpers.py | 4 ++-- {app => stack_orchestrator}/deploy/spec.py | 2 +- {app => stack_orchestrator}/deploy/stack.py | 2 +- .../deploy/stack_state.py | 0 cli.py => stack_orchestrator/main.py | 18 +++++++++--------- {app => stack_orchestrator}/opts.py | 2 +- {app => stack_orchestrator}/repos/__init__.py | 0 .../repos/setup_repositories.py | 4 ++-- {app => stack_orchestrator}/update.py | 2 +- {app => stack_orchestrator}/util.py | 0 {app => stack_orchestrator}/version.py | 2 +- 483 files changed, 82 insertions(+), 81 deletions(-) rename {app => stack_orchestrator}/__init__.py (100%) rename {app => stack_orchestrator}/__main__.py (100%) rename {app => stack_orchestrator}/base.py (98%) rename {app => stack_orchestrator}/build/__init__.py (100%) rename {app => stack_orchestrator}/build/build_containers.py (97%) rename {app => stack_orchestrator}/build/build_npms.py (97%) rename {app => stack_orchestrator}/command_types.py (100%) rename {app => stack_orchestrator}/data/__init__.py (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-contract-sushiswap.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-contract.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-eth-probe.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-eth-statediff-fill-service.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-eth-metrics.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-eth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-laconic-console.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-laconicd.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-lotus.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-optimism.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-plugeth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-pocket.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-foundry.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-go-ethereum-foundry.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-go-nitro.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-graph-node.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ipld-eth-beacon-db.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ipld-eth-beacon-indexer.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ipld-eth-db.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ipld-eth-server-payments.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ipld-eth-server.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-keycloak.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-kubo.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-laconic-dot-com.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-laconicd.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-lasso.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-api-proxy.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-keycloak.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-metrics.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth-plugeth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-eth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-go-opera.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mainnet-laconicd.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mobymask-app-v3.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mobymask-app.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-mobymask-snap.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-nitro-contracts.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-nitro-rpc-client.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-peer-test-app.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ponder-indexer.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-ponder-watcher.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-reth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-sushiswap-subgraph-v3.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-test.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-tx-spammer.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-azimuth.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-erc20.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-erc721.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-gelato.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-mobymask-v2.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-mobymask-v3.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-mobymask.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-sushiswap.yml (100%) rename {app => stack_orchestrator}/data/compose/docker-compose-watcher-uniswap-v3.yml (100%) rename {app => stack_orchestrator}/data/config/contract-sushiswap/deploy-core-contracts.sh (100%) rename {app => stack_orchestrator}/data/config/contract-sushiswap/deploy-periphery-contracts.sh (100%) rename {app => stack_orchestrator}/data/config/contract-sushiswap/deployment-params.env (100%) rename {app => stack_orchestrator}/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json (100%) rename {app => stack_orchestrator}/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml (100%) rename {app => stack_orchestrator}/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml (100%) rename {app => stack_orchestrator}/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml (100%) rename {app => stack_orchestrator}/data/config/fixturenet-eth/fixturenet-eth.env (100%) rename {app => stack_orchestrator}/data/config/fixturenet-laconicd/create-fixturenet.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-laconicd/export-myaddress.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-laconicd/export-mykey.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-laconicd/registry-cli-config-template.yml (100%) rename {app => stack_orchestrator}/data/config/fixturenet-lotus/fund-account.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-lotus/lotus-env.env (100%) rename {app => stack_orchestrator}/data/config/fixturenet-lotus/setup-miner.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-lotus/setup-node.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/generate-l2-config.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/l1-params.env (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/optimism-contracts/run.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/optimism-contracts/update-config.js (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/run-op-batcher.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/run-op-geth.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/run-op-node.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-optimism/run-op-proposer.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-pocket/chains.json (100%) rename {app => stack_orchestrator}/data/config/fixturenet-pocket/create-fixturenet.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-pocket/genesis.json (100%) rename {app => stack_orchestrator}/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template (100%) rename {app => stack_orchestrator}/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh (100%) rename {app => stack_orchestrator}/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh (100%) rename {app => stack_orchestrator}/data/config/foundry/foundry.toml (100%) rename {app => stack_orchestrator}/data/config/go-nitro/run-nitro-node.sh (100%) rename {app => stack_orchestrator}/data/config/ipld-eth-beacon-indexer/indexer.env (100%) rename {app => stack_orchestrator}/data/config/ipld-eth-server/chain.json (100%) rename {app => stack_orchestrator}/data/config/ipld-eth-server/entrypoint.sh (100%) rename {app => stack_orchestrator}/data/config/keycloak/import/cerc-realm.json (100%) rename {app => stack_orchestrator}/data/config/keycloak/keycloak.env (100%) rename {app => stack_orchestrator}/data/config/keycloak/nginx/keycloak_proxy.conf (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-api-proxy/ethpxy.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-ipld-eth-db/db.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-ipld-eth-server/config.toml (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-ipld-eth-server/srv.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/import/cerc-realm.json (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/keycloak.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/nginx.example (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-keycloak/ui/config.yml (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-metrics/metrics.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-plugeth/geth.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-plugeth/lighthouse.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-plugeth/scripts/run-geth.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth/geth.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth/lighthouse.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth/scripts/run-geth.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-eth/scripts/run-lighthouse.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-go-opera/go-opera.env (100%) rename {app => stack_orchestrator}/data/config/mainnet-go-opera/start-node.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-laconicd/registry-cli-config-template.yml (100%) rename {app => stack_orchestrator}/data/config/mainnet-laconicd/scripts/export-myaddress.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-laconicd/scripts/export-mykey.sh (100%) rename {app => stack_orchestrator}/data/config/mainnet-laconicd/scripts/run-laconicd.sh (100%) rename {app => stack_orchestrator}/data/config/network/wait-for-it.sh (100%) rename {app => stack_orchestrator}/data/config/nitro-contracts/deploy.sh (100%) rename {app => stack_orchestrator}/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts (100%) rename {app => stack_orchestrator}/data/config/optimism-contracts/hardhat-tasks/send-balance.ts (100%) rename {app => stack_orchestrator}/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts (100%) rename {app => stack_orchestrator}/data/config/ponder/base-rates-config.json (100%) rename {app => stack_orchestrator}/data/config/ponder/deploy-erc20-contract.sh (100%) rename {app => stack_orchestrator}/data/config/ponder/ponder-start.sh (100%) rename {app => stack_orchestrator}/data/config/ponder/ponder.indexer-1.config.ts (100%) rename {app => stack_orchestrator}/data/config/ponder/ponder.indexer-2.config.ts (100%) rename {app => stack_orchestrator}/data/config/ponder/ponder.watcher.config.ts (100%) rename {app => stack_orchestrator}/data/config/postgresql/create-pg-stat-statements.sql (100%) rename {app => stack_orchestrator}/data/config/postgresql/multiple-postgressql-databases.sh (100%) rename {app => stack_orchestrator}/data/config/reth/start-lighthouse.sh (100%) rename {app => stack_orchestrator}/data/config/reth/start-reth.sh (100%) rename {app => stack_orchestrator}/data/config/sushiswap-subgraph-v3/filecoin.js (100%) rename {app => stack_orchestrator}/data/config/sushiswap-subgraph-v3/run-blocks.sh (100%) rename {app => stack_orchestrator}/data/config/sushiswap-subgraph-v3/run-v3.sh (100%) rename {app => stack_orchestrator}/data/config/tx-spammer/tx-spammer.env (100%) rename {app => stack_orchestrator}/data/config/watcher-azimuth/gateway-watchers.json (100%) rename {app => stack_orchestrator}/data/config/watcher-azimuth/merge-toml.js (100%) rename {app => stack_orchestrator}/data/config/watcher-azimuth/start-server.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-azimuth/watcher-config-template.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-azimuth/watcher-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-erc20/erc20-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-erc721/erc721-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-gelato/create-and-import-checkpoint.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-gelato/start-job-runner.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-gelato/start-server.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-gelato/watcher-config-template.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-gelato/watcher-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/generate-peer-ids.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/mobymask-app-config.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/mobymask-app-start.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/mobymask-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/optimism-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/secrets-template.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/set-tests-env.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/start-server.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/test-app-config.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/test-app-start.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v2/watcher-config-template.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/mobymask-app-start.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/mobymask-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/start-server.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/watcher-config-rates.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask-v3/watcher-config-template.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask/mobymask-watcher-db.sql (100%) rename {app => stack_orchestrator}/data/config/watcher-mobymask/mobymask-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/erc20-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/lotus-params.env (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/sushi-info-watcher-test.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/sushi-info-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/sushi-watcher-test.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-sushiswap/sushi-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-uniswap-v3/erc20-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-uniswap-v3/run.sh (100%) rename {app => stack_orchestrator}/data/config/watcher-uniswap-v3/uni-info-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-uniswap-v3/uni-watcher.toml (100%) rename {app => stack_orchestrator}/data/config/watcher-uniswap-v3/watch-contract.sh (100%) rename {app => stack_orchestrator}/data/container-build/build-base.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-act-runner-task-executor/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-act-runner/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-gerbil/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-gerbil/README.md (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-gerbil/entrypoint.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-gerbil/install-dependencies.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/README.md (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/build-npm-package.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/check-uid.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/entrypoint.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-eth-api-proxy/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-eth-probe/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-eth-statediff-fill-service/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-eth-statediff-service/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-geth/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-geth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-geth/run-el.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-foundry/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/genesis.json (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-ethereum/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-nitro/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-nitro/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-go-opera/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-graph-node/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ipld-eth-beacon-db/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ipld-eth-db/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ipld-eth-server/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-keycloak-reg-api/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-keycloak-reg-ui/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-keycloak/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-keycloak/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-console-host/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-console-host/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-console-host/config.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-dot-com/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/import-address.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconic-registry-cli/import-key.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-laconicd/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lasso/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lighthouse-cli/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lighthouse/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lighthouse/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lighthouse/start-lighthouse.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lotus/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-lotus/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask-snap/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask-snap/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask-ui/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask-ui/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-mobymask/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-nitro-contracts/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-nitro-contracts/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-nitro-rpc-client/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-nitro-rpc-client/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-contracts/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-contracts/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-l2geth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-batcher/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-batcher/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-node/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-node/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-proposer/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-optimism-op-proposer/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-plugeth-statediff/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-plugeth-with-plugins/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-plugeth-with-plugins/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-plugeth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-pocket/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ponder/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-ponder/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-react-peer/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-react-peer/apply-webapp-config.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-react-peer/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-react-peer/start-serving-app.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-reth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-subgraphs/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-subgraphs/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-v3-core/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-v3-core/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-sushiswap-v3-periphery/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-test-container/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-test-container/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-test-container/run.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-test-contract/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-tx-spammer/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-uniswap-v3-info/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-uniswap-v3-info/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-azimuth/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-azimuth/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-erc20/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-erc20/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-erc721/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-erc721/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-gelato/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-gelato/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask-v2/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask-v2/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask-v3/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask-v3/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-mobymask/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-sushiswap/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-sushiswap/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-ts/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-ts/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-uniswap-v3/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-watcher-uniswap-v3/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-webapp-base/Dockerfile (100%) rename {app => stack_orchestrator}/data/container-build/cerc-webapp-base/apply-webapp-config.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-webapp-base/build.sh (100%) rename {app => stack_orchestrator}/data/container-build/cerc-webapp-base/config.yml (100%) rename {app => stack_orchestrator}/data/container-build/cerc-webapp-base/start-serving-app.sh (100%) rename {app => stack_orchestrator}/data/container-build/default-build.sh (100%) rename {app => stack_orchestrator}/data/container-image-list.txt (100%) rename {app => stack_orchestrator}/data/npm-package-list.txt (100%) rename {app => stack_orchestrator}/data/pod-list.txt (100%) rename {app => stack_orchestrator}/data/repository-list.txt (100%) rename {app => stack_orchestrator}/data/stacks/act-runner/README.md (100%) rename {app => stack_orchestrator}/data/stacks/act-runner/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/azimuth/README.md (100%) rename {app => stack_orchestrator}/data/stacks/azimuth/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/build-support/README.md (100%) rename {app => stack_orchestrator}/data/stacks/build-support/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/chain-chunker/README.md (100%) rename {app => stack_orchestrator}/data/stacks/chain-chunker/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/erc20/README.md (100%) rename {app => stack_orchestrator}/data/stacks/erc20/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/erc721/README.md (100%) rename {app => stack_orchestrator}/data/stacks/erc721/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth-loaded/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth-loaded/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth-tx/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth-tx/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-eth/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-laconic-loaded/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-laconic-loaded/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-laconicd/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-laconicd/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-lotus/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-lotus/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-optimism/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-optimism/l2-only.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-optimism/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-payments/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-payments/mobymask-demo.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-payments/ponder-demo.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-payments/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-plugeth-tx/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-plugeth-tx/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-pocket/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-pocket/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-sushiswap-subgraph/README.md (100%) rename {app => stack_orchestrator}/data/stacks/fixturenet-sushiswap-subgraph/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/gelato/README.md (100%) rename {app => stack_orchestrator}/data/stacks/gelato/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/graph-node/README.md (100%) rename {app => stack_orchestrator}/data/stacks/graph-node/deploy-subgraph.md (100%) rename {app => stack_orchestrator}/data/stacks/graph-node/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/kubo/README.md (100%) rename {app => stack_orchestrator}/data/stacks/kubo/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/laconic-dot-com/README.md (100%) rename {app => stack_orchestrator}/data/stacks/laconic-dot-com/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/lasso/README.md (100%) rename {app => stack_orchestrator}/data/stacks/lasso/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth-plugeth/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth-plugeth/deploy/commands.py (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth-plugeth/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth/deploy/commands.py (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-eth/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-go-opera/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-go-opera/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-laconic/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-laconic/deploy/commands.py (97%) rename {app => stack_orchestrator}/data/stacks/mainnet-laconic/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/demo.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/mobymask-only.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v2/web-apps.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v3/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v3/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v3/watcher.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask-v3/web-app.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask/README.md (100%) rename {app => stack_orchestrator}/data/stacks/mobymask/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/package-registry/README.md (100%) rename {app => stack_orchestrator}/data/stacks/package-registry/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/reth/README.md (100%) rename {app => stack_orchestrator}/data/stacks/reth/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/sushiswap-subgraph/README.md (100%) rename {app => stack_orchestrator}/data/stacks/sushiswap-subgraph/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/sushiswap/README.md (100%) rename {app => stack_orchestrator}/data/stacks/sushiswap/smoke-tests.md (100%) rename {app => stack_orchestrator}/data/stacks/sushiswap/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/test/README.md (100%) rename {app => stack_orchestrator}/data/stacks/test/deploy/commands.py (88%) rename {app => stack_orchestrator}/data/stacks/test/stack.yml (100%) rename {app => stack_orchestrator}/data/stacks/uniswap-v3/README.md (100%) rename {app => stack_orchestrator}/data/stacks/uniswap-v3/stack.yml (100%) rename {app => stack_orchestrator}/data/version.txt (100%) rename {app => stack_orchestrator}/deploy/__init__.py (100%) rename {app => stack_orchestrator}/deploy/compose/__init__.py (100%) rename {app => stack_orchestrator}/deploy/compose/deploy_docker.py (96%) rename {app => stack_orchestrator}/deploy/deploy.py (96%) rename {app => stack_orchestrator}/deploy/deploy_types.py (93%) rename {app => stack_orchestrator}/deploy/deploy_util.py (92%) rename {app => stack_orchestrator}/deploy/deployer.py (100%) rename {app => stack_orchestrator}/deploy/deployer_factory.py (87%) rename {app => stack_orchestrator}/deploy/deployment.py (94%) rename {app => stack_orchestrator}/deploy/deployment_create.py (97%) rename {app => stack_orchestrator}/deploy/k8s/__init__.py (100%) rename {app => stack_orchestrator}/deploy/k8s/cluster_info.py (94%) rename {app => stack_orchestrator}/deploy/k8s/deploy_k8s.py (92%) rename {app => stack_orchestrator}/deploy/k8s/helpers.py (98%) rename {app => stack_orchestrator}/deploy/spec.py (95%) rename {app => stack_orchestrator}/deploy/stack.py (95%) rename {app => stack_orchestrator}/deploy/stack_state.py (100%) rename cli.py => stack_orchestrator/main.py (82%) rename {app => stack_orchestrator}/opts.py (92%) rename {app => stack_orchestrator}/repos/__init__.py (100%) rename {app => stack_orchestrator}/repos/setup_repositories.py (99%) rename {app => stack_orchestrator}/update.py (98%) rename {app => stack_orchestrator}/util.py (100%) rename {app => stack_orchestrator}/version.py (96%) diff --git a/.gitignore b/.gitignore index 35a9c9ec..3aaa220b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,5 @@ laconic_stack_orchestrator.egg-info __pycache__ *~ package -app/data/build_tag.txt +stack_orchestrator/data/build_tag.txt /build diff --git a/README.md b/README.md index 52c06830..aa979e3a 100644 --- a/README.md +++ b/README.md @@ -64,12 +64,12 @@ laconic-so update ## Usage -The various [stacks](/app/data/stacks) each contain instructions for running different stacks based on your use case. For example: +The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: -- [self-hosted Gitea](/app/data/stacks/build-support) -- [an Optimism Fixturenet](/app/data/stacks/fixturenet-optimism) -- [laconicd with console and CLI](app/data/stacks/fixturenet-laconic-loaded) -- [kubo (IPFS)](app/data/stacks/kubo) +- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) +- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism) +- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded) +- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo) ## Contributing diff --git a/docs/adding-a-new-stack.md b/docs/adding-a-new-stack.md index 4fbf27b2..2b2d1a65 100644 --- a/docs/adding-a-new-stack.md +++ b/docs/adding-a-new-stack.md @@ -8,7 +8,7 @@ Core to the feature completeness of stack orchestrator is to [decouple the tool ## Example -- in `app/data/stacks/my-new-stack/stack.yml` add: +- in `stack_orchestrator/data/stacks/my-new-stack/stack.yml` add: ```yaml version: "0.1" @@ -21,7 +21,7 @@ pods: - my-new-stack ``` -- in `app/data/container-build/cerc-my-new-stack/build.sh` add: +- in `stack_orchestrator/data/container-build/cerc-my-new-stack/build.sh` add: ```yaml #!/usr/bin/env bash @@ -30,7 +30,7 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh docker build -t cerc/my-new-stack:local -f ${CERC_REPO_BASE_DIR}/my-new-stack/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/my-new-stack ``` -- in `app/data/compose/docker-compose-my-new-stack.yml` add: +- in `stack_orchestrator/data/compose/docker-compose-my-new-stack.yml` add: ```yaml version: "3.2" @@ -43,20 +43,20 @@ services: - "0.0.0.0:3000:3000" ``` -- in `app/data/repository-list.txt` add: +- in `stack_orchestrator/data/repository-list.txt` add: ```bash github.com/my-org/my-new-stack ``` whereby that repository contains your source code and a `Dockerfile`, and matches the `repos:` field in the `stack.yml`. -- in `app/data/container-image-list.txt` add: +- in `stack_orchestrator/data/container-image-list.txt` add: ```bash cerc/my-new-stack ``` -- in `app/data/pod-list.txt` add: +- in `stack_orchestrator/data/pod-list.txt` add: ```bash my-new-stack diff --git a/scripts/create_build_tag_file.sh b/scripts/create_build_tag_file.sh index c814a420..077abf31 100755 --- a/scripts/create_build_tag_file.sh +++ b/scripts/create_build_tag_file.sh @@ -1,6 +1,6 @@ -build_tag_file_name=./app/data/build_tag.txt +build_tag_file_name=./stack_orchestrator/data/build_tag.txt echo "# This file should be re-generated running: scripts/create_build_tag_file.sh script" > $build_tag_file_name -product_version_string=$( tail -1 ./app/data/version.txt ) +product_version_string=$( tail -1 ./stack_orchestrator/data/version.txt ) commit_string=$( git rev-parse --short HEAD ) timestamp_string=$(date +'%Y%m%d%H%M') build_tag_string=${product_version_string}-${commit_string}-${timestamp_string} diff --git a/setup.py b/setup.py index 86050fbc..d89dfc4d 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ setup( long_description=long_description, long_description_content_type="text/markdown", url='https://github.com/cerc-io/stack-orchestrator', - py_modules=['cli', 'app'], + py_modules=['stack_orchestrator'], packages=find_packages(), install_requires=[requirements], python_requires='>=3.7', @@ -25,6 +25,6 @@ setup( "Operating System :: OS Independent", ], entry_points={ - 'console_scripts': ['laconic-so=cli:cli'], + 'console_scripts': ['laconic-so=stack_orchestrator.main:cli'], } ) diff --git a/app/__init__.py b/stack_orchestrator/__init__.py similarity index 100% rename from app/__init__.py rename to stack_orchestrator/__init__.py diff --git a/app/__main__.py b/stack_orchestrator/__main__.py similarity index 100% rename from app/__main__.py rename to stack_orchestrator/__main__.py diff --git a/app/base.py b/stack_orchestrator/base.py similarity index 98% rename from app/base.py rename to stack_orchestrator/base.py index ba3504ba..811d085d 100644 --- a/app/base.py +++ b/stack_orchestrator/base.py @@ -15,7 +15,7 @@ import os from abc import ABC, abstractmethod -from app.deploy.deploy import get_stack_status +from stack_orchestrator.deploy.deploy import get_stack_status from decouple import config diff --git a/app/build/__init__.py b/stack_orchestrator/build/__init__.py similarity index 100% rename from app/build/__init__.py rename to stack_orchestrator/build/__init__.py diff --git a/app/build/build_containers.py b/stack_orchestrator/build/build_containers.py similarity index 97% rename from app/build/build_containers.py rename to stack_orchestrator/build/build_containers.py index ee74b807..c97a974f 100644 --- a/app/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -27,8 +27,8 @@ import subprocess import click import importlib.resources from pathlib import Path -from app.util import include_exclude_check, get_parsed_stack_config -from app.base import get_npm_registry_url +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config +from stack_orchestrator.base import get_npm_registry_url # TODO: find a place for this # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" @@ -67,7 +67,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): print('Dev root directory doesn\'t exist, creating') # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file: all_containers = container_list_file.read().splitlines() diff --git a/app/build/build_npms.py b/stack_orchestrator/build/build_npms.py similarity index 97% rename from app/build/build_npms.py rename to stack_orchestrator/build/build_npms.py index 2ffbea1b..c8e3af43 100644 --- a/app/build/build_npms.py +++ b/stack_orchestrator/build/build_npms.py @@ -25,8 +25,8 @@ from decouple import config import click import importlib.resources from python_on_whales import docker, DockerException -from app.base import get_stack -from app.util import include_exclude_check, get_parsed_stack_config +from stack_orchestrator.base import get_stack +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config builder_js_image_name = "cerc/builder-js:local" @@ -83,7 +83,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): os.makedirs(build_root_path) # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: all_packages = package_list_file.read().splitlines() diff --git a/app/command_types.py b/stack_orchestrator/command_types.py similarity index 100% rename from app/command_types.py rename to stack_orchestrator/command_types.py diff --git a/app/data/__init__.py b/stack_orchestrator/data/__init__.py similarity index 100% rename from app/data/__init__.py rename to stack_orchestrator/data/__init__.py diff --git a/app/data/compose/docker-compose-contract-sushiswap.yml b/stack_orchestrator/data/compose/docker-compose-contract-sushiswap.yml similarity index 100% rename from app/data/compose/docker-compose-contract-sushiswap.yml rename to stack_orchestrator/data/compose/docker-compose-contract-sushiswap.yml diff --git a/app/data/compose/docker-compose-contract.yml b/stack_orchestrator/data/compose/docker-compose-contract.yml similarity index 100% rename from app/data/compose/docker-compose-contract.yml rename to stack_orchestrator/data/compose/docker-compose-contract.yml diff --git a/app/data/compose/docker-compose-eth-probe.yml b/stack_orchestrator/data/compose/docker-compose-eth-probe.yml similarity index 100% rename from app/data/compose/docker-compose-eth-probe.yml rename to stack_orchestrator/data/compose/docker-compose-eth-probe.yml diff --git a/app/data/compose/docker-compose-eth-statediff-fill-service.yml b/stack_orchestrator/data/compose/docker-compose-eth-statediff-fill-service.yml similarity index 100% rename from app/data/compose/docker-compose-eth-statediff-fill-service.yml rename to stack_orchestrator/data/compose/docker-compose-eth-statediff-fill-service.yml diff --git a/app/data/compose/docker-compose-fixturenet-eth-metrics.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-eth-metrics.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-eth-metrics.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-eth-metrics.yml diff --git a/app/data/compose/docker-compose-fixturenet-eth.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-eth.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-eth.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-eth.yml diff --git a/app/data/compose/docker-compose-fixturenet-laconic-console.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-laconic-console.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml diff --git a/app/data/compose/docker-compose-fixturenet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml diff --git a/app/data/compose/docker-compose-fixturenet-lotus.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-lotus.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-lotus.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-lotus.yml diff --git a/app/data/compose/docker-compose-fixturenet-optimism.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-optimism.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml diff --git a/app/data/compose/docker-compose-fixturenet-plugeth.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-plugeth.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-plugeth.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-plugeth.yml diff --git a/app/data/compose/docker-compose-fixturenet-pocket.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-pocket.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-pocket.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-pocket.yml diff --git a/app/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml similarity index 100% rename from app/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml rename to stack_orchestrator/data/compose/docker-compose-fixturenet-sushiswap-subgraph-v3.yml diff --git a/app/data/compose/docker-compose-foundry.yml b/stack_orchestrator/data/compose/docker-compose-foundry.yml similarity index 100% rename from app/data/compose/docker-compose-foundry.yml rename to stack_orchestrator/data/compose/docker-compose-foundry.yml diff --git a/app/data/compose/docker-compose-go-ethereum-foundry.yml b/stack_orchestrator/data/compose/docker-compose-go-ethereum-foundry.yml similarity index 100% rename from app/data/compose/docker-compose-go-ethereum-foundry.yml rename to stack_orchestrator/data/compose/docker-compose-go-ethereum-foundry.yml diff --git a/app/data/compose/docker-compose-go-nitro.yml b/stack_orchestrator/data/compose/docker-compose-go-nitro.yml similarity index 100% rename from app/data/compose/docker-compose-go-nitro.yml rename to stack_orchestrator/data/compose/docker-compose-go-nitro.yml diff --git a/app/data/compose/docker-compose-graph-node.yml b/stack_orchestrator/data/compose/docker-compose-graph-node.yml similarity index 100% rename from app/data/compose/docker-compose-graph-node.yml rename to stack_orchestrator/data/compose/docker-compose-graph-node.yml diff --git a/app/data/compose/docker-compose-ipld-eth-beacon-db.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-db.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-beacon-db.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-db.yml diff --git a/app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-indexer.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-beacon-indexer.yml diff --git a/app/data/compose/docker-compose-ipld-eth-db.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-db.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-db.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-db.yml diff --git a/app/data/compose/docker-compose-ipld-eth-server-payments.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-server-payments.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-server-payments.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-server-payments.yml diff --git a/app/data/compose/docker-compose-ipld-eth-server.yml b/stack_orchestrator/data/compose/docker-compose-ipld-eth-server.yml similarity index 100% rename from app/data/compose/docker-compose-ipld-eth-server.yml rename to stack_orchestrator/data/compose/docker-compose-ipld-eth-server.yml diff --git a/app/data/compose/docker-compose-keycloak.yml b/stack_orchestrator/data/compose/docker-compose-keycloak.yml similarity index 100% rename from app/data/compose/docker-compose-keycloak.yml rename to stack_orchestrator/data/compose/docker-compose-keycloak.yml diff --git a/app/data/compose/docker-compose-kubo.yml b/stack_orchestrator/data/compose/docker-compose-kubo.yml similarity index 100% rename from app/data/compose/docker-compose-kubo.yml rename to stack_orchestrator/data/compose/docker-compose-kubo.yml diff --git a/app/data/compose/docker-compose-laconic-dot-com.yml b/stack_orchestrator/data/compose/docker-compose-laconic-dot-com.yml similarity index 100% rename from app/data/compose/docker-compose-laconic-dot-com.yml rename to stack_orchestrator/data/compose/docker-compose-laconic-dot-com.yml diff --git a/app/data/compose/docker-compose-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-laconicd.yml similarity index 100% rename from app/data/compose/docker-compose-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-laconicd.yml diff --git a/app/data/compose/docker-compose-lasso.yml b/stack_orchestrator/data/compose/docker-compose-lasso.yml similarity index 100% rename from app/data/compose/docker-compose-lasso.yml rename to stack_orchestrator/data/compose/docker-compose-lasso.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-api-proxy.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-api-proxy.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-api-proxy.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-api-proxy.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-db.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-ipld-eth-server.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-keycloak.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-keycloak.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-keycloak.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-keycloak.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-metrics.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-metrics.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-metrics.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-metrics.yml diff --git a/app/data/compose/docker-compose-mainnet-eth-plugeth.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth-plugeth.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth-plugeth.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth-plugeth.yml diff --git a/app/data/compose/docker-compose-mainnet-eth.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-eth.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-eth.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-eth.yml diff --git a/app/data/compose/docker-compose-mainnet-go-opera.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-go-opera.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-go-opera.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-go-opera.yml diff --git a/app/data/compose/docker-compose-mainnet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-mainnet-laconicd.yml similarity index 100% rename from app/data/compose/docker-compose-mainnet-laconicd.yml rename to stack_orchestrator/data/compose/docker-compose-mainnet-laconicd.yml diff --git a/app/data/compose/docker-compose-mobymask-app-v3.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-app-v3.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-app-v3.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-app-v3.yml diff --git a/app/data/compose/docker-compose-mobymask-app.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-app.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-app.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-app.yml diff --git a/app/data/compose/docker-compose-mobymask-snap.yml b/stack_orchestrator/data/compose/docker-compose-mobymask-snap.yml similarity index 100% rename from app/data/compose/docker-compose-mobymask-snap.yml rename to stack_orchestrator/data/compose/docker-compose-mobymask-snap.yml diff --git a/app/data/compose/docker-compose-nitro-contracts.yml b/stack_orchestrator/data/compose/docker-compose-nitro-contracts.yml similarity index 100% rename from app/data/compose/docker-compose-nitro-contracts.yml rename to stack_orchestrator/data/compose/docker-compose-nitro-contracts.yml diff --git a/app/data/compose/docker-compose-nitro-rpc-client.yml b/stack_orchestrator/data/compose/docker-compose-nitro-rpc-client.yml similarity index 100% rename from app/data/compose/docker-compose-nitro-rpc-client.yml rename to stack_orchestrator/data/compose/docker-compose-nitro-rpc-client.yml diff --git a/app/data/compose/docker-compose-peer-test-app.yml b/stack_orchestrator/data/compose/docker-compose-peer-test-app.yml similarity index 100% rename from app/data/compose/docker-compose-peer-test-app.yml rename to stack_orchestrator/data/compose/docker-compose-peer-test-app.yml diff --git a/app/data/compose/docker-compose-ponder-indexer.yml b/stack_orchestrator/data/compose/docker-compose-ponder-indexer.yml similarity index 100% rename from app/data/compose/docker-compose-ponder-indexer.yml rename to stack_orchestrator/data/compose/docker-compose-ponder-indexer.yml diff --git a/app/data/compose/docker-compose-ponder-watcher.yml b/stack_orchestrator/data/compose/docker-compose-ponder-watcher.yml similarity index 100% rename from app/data/compose/docker-compose-ponder-watcher.yml rename to stack_orchestrator/data/compose/docker-compose-ponder-watcher.yml diff --git a/app/data/compose/docker-compose-reth.yml b/stack_orchestrator/data/compose/docker-compose-reth.yml similarity index 100% rename from app/data/compose/docker-compose-reth.yml rename to stack_orchestrator/data/compose/docker-compose-reth.yml diff --git a/app/data/compose/docker-compose-sushiswap-subgraph-v3.yml b/stack_orchestrator/data/compose/docker-compose-sushiswap-subgraph-v3.yml similarity index 100% rename from app/data/compose/docker-compose-sushiswap-subgraph-v3.yml rename to stack_orchestrator/data/compose/docker-compose-sushiswap-subgraph-v3.yml diff --git a/app/data/compose/docker-compose-test.yml b/stack_orchestrator/data/compose/docker-compose-test.yml similarity index 100% rename from app/data/compose/docker-compose-test.yml rename to stack_orchestrator/data/compose/docker-compose-test.yml diff --git a/app/data/compose/docker-compose-tx-spammer.yml b/stack_orchestrator/data/compose/docker-compose-tx-spammer.yml similarity index 100% rename from app/data/compose/docker-compose-tx-spammer.yml rename to stack_orchestrator/data/compose/docker-compose-tx-spammer.yml diff --git a/app/data/compose/docker-compose-watcher-azimuth.yml b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-azimuth.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml diff --git a/app/data/compose/docker-compose-watcher-erc20.yml b/stack_orchestrator/data/compose/docker-compose-watcher-erc20.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-erc20.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-erc20.yml diff --git a/app/data/compose/docker-compose-watcher-erc721.yml b/stack_orchestrator/data/compose/docker-compose-watcher-erc721.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-erc721.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-erc721.yml diff --git a/app/data/compose/docker-compose-watcher-gelato.yml b/stack_orchestrator/data/compose/docker-compose-watcher-gelato.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-gelato.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-gelato.yml diff --git a/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v2.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask-v2.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v2.yml diff --git a/app/data/compose/docker-compose-watcher-mobymask-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v3.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask-v3.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask-v3.yml diff --git a/app/data/compose/docker-compose-watcher-mobymask.yml b/stack_orchestrator/data/compose/docker-compose-watcher-mobymask.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-mobymask.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-mobymask.yml diff --git a/app/data/compose/docker-compose-watcher-sushiswap.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-sushiswap.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-sushiswap.yml diff --git a/app/data/compose/docker-compose-watcher-uniswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-uniswap-v3.yml similarity index 100% rename from app/data/compose/docker-compose-watcher-uniswap-v3.yml rename to stack_orchestrator/data/compose/docker-compose-watcher-uniswap-v3.yml diff --git a/app/data/config/contract-sushiswap/deploy-core-contracts.sh b/stack_orchestrator/data/config/contract-sushiswap/deploy-core-contracts.sh similarity index 100% rename from app/data/config/contract-sushiswap/deploy-core-contracts.sh rename to stack_orchestrator/data/config/contract-sushiswap/deploy-core-contracts.sh diff --git a/app/data/config/contract-sushiswap/deploy-periphery-contracts.sh b/stack_orchestrator/data/config/contract-sushiswap/deploy-periphery-contracts.sh similarity index 100% rename from app/data/config/contract-sushiswap/deploy-periphery-contracts.sh rename to stack_orchestrator/data/config/contract-sushiswap/deploy-periphery-contracts.sh diff --git a/app/data/config/contract-sushiswap/deployment-params.env b/stack_orchestrator/data/config/contract-sushiswap/deployment-params.env similarity index 100% rename from app/data/config/contract-sushiswap/deployment-params.env rename to stack_orchestrator/data/config/contract-sushiswap/deployment-params.env diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/dashboards/fixturenet_dashboard.json diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml diff --git a/app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml diff --git a/app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml b/stack_orchestrator/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml similarity index 100% rename from app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml rename to stack_orchestrator/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml diff --git a/app/data/config/fixturenet-eth/fixturenet-eth.env b/stack_orchestrator/data/config/fixturenet-eth/fixturenet-eth.env similarity index 100% rename from app/data/config/fixturenet-eth/fixturenet-eth.env rename to stack_orchestrator/data/config/fixturenet-eth/fixturenet-eth.env diff --git a/app/data/config/fixturenet-laconicd/create-fixturenet.sh b/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh similarity index 100% rename from app/data/config/fixturenet-laconicd/create-fixturenet.sh rename to stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh diff --git a/app/data/config/fixturenet-laconicd/export-myaddress.sh b/stack_orchestrator/data/config/fixturenet-laconicd/export-myaddress.sh similarity index 100% rename from app/data/config/fixturenet-laconicd/export-myaddress.sh rename to stack_orchestrator/data/config/fixturenet-laconicd/export-myaddress.sh diff --git a/app/data/config/fixturenet-laconicd/export-mykey.sh b/stack_orchestrator/data/config/fixturenet-laconicd/export-mykey.sh similarity index 100% rename from app/data/config/fixturenet-laconicd/export-mykey.sh rename to stack_orchestrator/data/config/fixturenet-laconicd/export-mykey.sh diff --git a/app/data/config/fixturenet-laconicd/registry-cli-config-template.yml b/stack_orchestrator/data/config/fixturenet-laconicd/registry-cli-config-template.yml similarity index 100% rename from app/data/config/fixturenet-laconicd/registry-cli-config-template.yml rename to stack_orchestrator/data/config/fixturenet-laconicd/registry-cli-config-template.yml diff --git a/app/data/config/fixturenet-lotus/fund-account.sh b/stack_orchestrator/data/config/fixturenet-lotus/fund-account.sh similarity index 100% rename from app/data/config/fixturenet-lotus/fund-account.sh rename to stack_orchestrator/data/config/fixturenet-lotus/fund-account.sh diff --git a/app/data/config/fixturenet-lotus/lotus-env.env b/stack_orchestrator/data/config/fixturenet-lotus/lotus-env.env similarity index 100% rename from app/data/config/fixturenet-lotus/lotus-env.env rename to stack_orchestrator/data/config/fixturenet-lotus/lotus-env.env diff --git a/app/data/config/fixturenet-lotus/setup-miner.sh b/stack_orchestrator/data/config/fixturenet-lotus/setup-miner.sh similarity index 100% rename from app/data/config/fixturenet-lotus/setup-miner.sh rename to stack_orchestrator/data/config/fixturenet-lotus/setup-miner.sh diff --git a/app/data/config/fixturenet-lotus/setup-node.sh b/stack_orchestrator/data/config/fixturenet-lotus/setup-node.sh similarity index 100% rename from app/data/config/fixturenet-lotus/setup-node.sh rename to stack_orchestrator/data/config/fixturenet-lotus/setup-node.sh diff --git a/app/data/config/fixturenet-optimism/generate-l2-config.sh b/stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh similarity index 100% rename from app/data/config/fixturenet-optimism/generate-l2-config.sh rename to stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh diff --git a/app/data/config/fixturenet-optimism/l1-params.env b/stack_orchestrator/data/config/fixturenet-optimism/l1-params.env similarity index 100% rename from app/data/config/fixturenet-optimism/l1-params.env rename to stack_orchestrator/data/config/fixturenet-optimism/l1-params.env diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/run.sh b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh similarity index 100% rename from app/data/config/fixturenet-optimism/optimism-contracts/run.sh rename to stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh diff --git a/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js similarity index 100% rename from app/data/config/fixturenet-optimism/optimism-contracts/update-config.js rename to stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js diff --git a/app/data/config/fixturenet-optimism/run-op-batcher.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh similarity index 100% rename from app/data/config/fixturenet-optimism/run-op-batcher.sh rename to stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh diff --git a/app/data/config/fixturenet-optimism/run-op-geth.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh similarity index 100% rename from app/data/config/fixturenet-optimism/run-op-geth.sh rename to stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh diff --git a/app/data/config/fixturenet-optimism/run-op-node.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh similarity index 100% rename from app/data/config/fixturenet-optimism/run-op-node.sh rename to stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh diff --git a/app/data/config/fixturenet-optimism/run-op-proposer.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh similarity index 100% rename from app/data/config/fixturenet-optimism/run-op-proposer.sh rename to stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh diff --git a/app/data/config/fixturenet-pocket/chains.json b/stack_orchestrator/data/config/fixturenet-pocket/chains.json similarity index 100% rename from app/data/config/fixturenet-pocket/chains.json rename to stack_orchestrator/data/config/fixturenet-pocket/chains.json diff --git a/app/data/config/fixturenet-pocket/create-fixturenet.sh b/stack_orchestrator/data/config/fixturenet-pocket/create-fixturenet.sh similarity index 100% rename from app/data/config/fixturenet-pocket/create-fixturenet.sh rename to stack_orchestrator/data/config/fixturenet-pocket/create-fixturenet.sh diff --git a/app/data/config/fixturenet-pocket/genesis.json b/stack_orchestrator/data/config/fixturenet-pocket/genesis.json similarity index 100% rename from app/data/config/fixturenet-pocket/genesis.json rename to stack_orchestrator/data/config/fixturenet-pocket/genesis.json diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/lotus-fixturenet.js.template diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-blocks.sh diff --git a/app/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh b/stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh similarity index 100% rename from app/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh rename to stack_orchestrator/data/config/fixturenet-sushiswap-subgraph-v3/run-v3.sh diff --git a/app/data/config/foundry/foundry.toml b/stack_orchestrator/data/config/foundry/foundry.toml similarity index 100% rename from app/data/config/foundry/foundry.toml rename to stack_orchestrator/data/config/foundry/foundry.toml diff --git a/app/data/config/go-nitro/run-nitro-node.sh b/stack_orchestrator/data/config/go-nitro/run-nitro-node.sh similarity index 100% rename from app/data/config/go-nitro/run-nitro-node.sh rename to stack_orchestrator/data/config/go-nitro/run-nitro-node.sh diff --git a/app/data/config/ipld-eth-beacon-indexer/indexer.env b/stack_orchestrator/data/config/ipld-eth-beacon-indexer/indexer.env similarity index 100% rename from app/data/config/ipld-eth-beacon-indexer/indexer.env rename to stack_orchestrator/data/config/ipld-eth-beacon-indexer/indexer.env diff --git a/app/data/config/ipld-eth-server/chain.json b/stack_orchestrator/data/config/ipld-eth-server/chain.json similarity index 100% rename from app/data/config/ipld-eth-server/chain.json rename to stack_orchestrator/data/config/ipld-eth-server/chain.json diff --git a/app/data/config/ipld-eth-server/entrypoint.sh b/stack_orchestrator/data/config/ipld-eth-server/entrypoint.sh similarity index 100% rename from app/data/config/ipld-eth-server/entrypoint.sh rename to stack_orchestrator/data/config/ipld-eth-server/entrypoint.sh diff --git a/app/data/config/keycloak/import/cerc-realm.json b/stack_orchestrator/data/config/keycloak/import/cerc-realm.json similarity index 100% rename from app/data/config/keycloak/import/cerc-realm.json rename to stack_orchestrator/data/config/keycloak/import/cerc-realm.json diff --git a/app/data/config/keycloak/keycloak.env b/stack_orchestrator/data/config/keycloak/keycloak.env similarity index 100% rename from app/data/config/keycloak/keycloak.env rename to stack_orchestrator/data/config/keycloak/keycloak.env diff --git a/app/data/config/keycloak/nginx/keycloak_proxy.conf b/stack_orchestrator/data/config/keycloak/nginx/keycloak_proxy.conf similarity index 100% rename from app/data/config/keycloak/nginx/keycloak_proxy.conf rename to stack_orchestrator/data/config/keycloak/nginx/keycloak_proxy.conf diff --git a/app/data/config/mainnet-eth-api-proxy/ethpxy.env b/stack_orchestrator/data/config/mainnet-eth-api-proxy/ethpxy.env similarity index 100% rename from app/data/config/mainnet-eth-api-proxy/ethpxy.env rename to stack_orchestrator/data/config/mainnet-eth-api-proxy/ethpxy.env diff --git a/app/data/config/mainnet-eth-ipld-eth-db/db.env b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-db/db.env similarity index 100% rename from app/data/config/mainnet-eth-ipld-eth-db/db.env rename to stack_orchestrator/data/config/mainnet-eth-ipld-eth-db/db.env diff --git a/app/data/config/mainnet-eth-ipld-eth-server/config.toml b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/config.toml similarity index 100% rename from app/data/config/mainnet-eth-ipld-eth-server/config.toml rename to stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/config.toml diff --git a/app/data/config/mainnet-eth-ipld-eth-server/srv.env b/stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/srv.env similarity index 100% rename from app/data/config/mainnet-eth-ipld-eth-server/srv.env rename to stack_orchestrator/data/config/mainnet-eth-ipld-eth-server/srv.env diff --git a/app/data/config/mainnet-eth-keycloak/import/cerc-realm.json b/stack_orchestrator/data/config/mainnet-eth-keycloak/import/cerc-realm.json similarity index 100% rename from app/data/config/mainnet-eth-keycloak/import/cerc-realm.json rename to stack_orchestrator/data/config/mainnet-eth-keycloak/import/cerc-realm.json diff --git a/app/data/config/mainnet-eth-keycloak/keycloak.env b/stack_orchestrator/data/config/mainnet-eth-keycloak/keycloak.env similarity index 100% rename from app/data/config/mainnet-eth-keycloak/keycloak.env rename to stack_orchestrator/data/config/mainnet-eth-keycloak/keycloak.env diff --git a/app/data/config/mainnet-eth-keycloak/nginx.example b/stack_orchestrator/data/config/mainnet-eth-keycloak/nginx.example similarity index 100% rename from app/data/config/mainnet-eth-keycloak/nginx.example rename to stack_orchestrator/data/config/mainnet-eth-keycloak/nginx.example diff --git a/app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py similarity index 100% rename from app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py rename to stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/keycloak-mirror.py diff --git a/app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt b/stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt similarity index 100% rename from app/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt rename to stack_orchestrator/data/config/mainnet-eth-keycloak/scripts/keycloak-mirror/requirements.txt diff --git a/app/data/config/mainnet-eth-keycloak/ui/config.yml b/stack_orchestrator/data/config/mainnet-eth-keycloak/ui/config.yml similarity index 100% rename from app/data/config/mainnet-eth-keycloak/ui/config.yml rename to stack_orchestrator/data/config/mainnet-eth-keycloak/ui/config.yml diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/dashboards/eth_dashboard.json diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/dashboards/dashboards.yml diff --git a/app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/grafana/etc/provisioning/datasources/prometheus.yml diff --git a/app/data/config/mainnet-eth-metrics/metrics.env b/stack_orchestrator/data/config/mainnet-eth-metrics/metrics.env similarity index 100% rename from app/data/config/mainnet-eth-metrics/metrics.env rename to stack_orchestrator/data/config/mainnet-eth-metrics/metrics.env diff --git a/app/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml b/stack_orchestrator/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml similarity index 100% rename from app/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml rename to stack_orchestrator/data/config/mainnet-eth-metrics/prometheus/etc/prometheus.yml diff --git a/app/data/config/mainnet-eth-plugeth/geth.env b/stack_orchestrator/data/config/mainnet-eth-plugeth/geth.env similarity index 100% rename from app/data/config/mainnet-eth-plugeth/geth.env rename to stack_orchestrator/data/config/mainnet-eth-plugeth/geth.env diff --git a/app/data/config/mainnet-eth-plugeth/lighthouse.env b/stack_orchestrator/data/config/mainnet-eth-plugeth/lighthouse.env similarity index 100% rename from app/data/config/mainnet-eth-plugeth/lighthouse.env rename to stack_orchestrator/data/config/mainnet-eth-plugeth/lighthouse.env diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh b/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-geth.sh similarity index 100% rename from app/data/config/mainnet-eth-plugeth/scripts/run-geth.sh rename to stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-geth.sh diff --git a/app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh b/stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh similarity index 100% rename from app/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh rename to stack_orchestrator/data/config/mainnet-eth-plugeth/scripts/run-lighthouse.sh diff --git a/app/data/config/mainnet-eth/geth.env b/stack_orchestrator/data/config/mainnet-eth/geth.env similarity index 100% rename from app/data/config/mainnet-eth/geth.env rename to stack_orchestrator/data/config/mainnet-eth/geth.env diff --git a/app/data/config/mainnet-eth/lighthouse.env b/stack_orchestrator/data/config/mainnet-eth/lighthouse.env similarity index 100% rename from app/data/config/mainnet-eth/lighthouse.env rename to stack_orchestrator/data/config/mainnet-eth/lighthouse.env diff --git a/app/data/config/mainnet-eth/scripts/run-geth.sh b/stack_orchestrator/data/config/mainnet-eth/scripts/run-geth.sh similarity index 100% rename from app/data/config/mainnet-eth/scripts/run-geth.sh rename to stack_orchestrator/data/config/mainnet-eth/scripts/run-geth.sh diff --git a/app/data/config/mainnet-eth/scripts/run-lighthouse.sh b/stack_orchestrator/data/config/mainnet-eth/scripts/run-lighthouse.sh similarity index 100% rename from app/data/config/mainnet-eth/scripts/run-lighthouse.sh rename to stack_orchestrator/data/config/mainnet-eth/scripts/run-lighthouse.sh diff --git a/app/data/config/mainnet-go-opera/go-opera.env b/stack_orchestrator/data/config/mainnet-go-opera/go-opera.env similarity index 100% rename from app/data/config/mainnet-go-opera/go-opera.env rename to stack_orchestrator/data/config/mainnet-go-opera/go-opera.env diff --git a/app/data/config/mainnet-go-opera/start-node.sh b/stack_orchestrator/data/config/mainnet-go-opera/start-node.sh similarity index 100% rename from app/data/config/mainnet-go-opera/start-node.sh rename to stack_orchestrator/data/config/mainnet-go-opera/start-node.sh diff --git a/app/data/config/mainnet-laconicd/registry-cli-config-template.yml b/stack_orchestrator/data/config/mainnet-laconicd/registry-cli-config-template.yml similarity index 100% rename from app/data/config/mainnet-laconicd/registry-cli-config-template.yml rename to stack_orchestrator/data/config/mainnet-laconicd/registry-cli-config-template.yml diff --git a/app/data/config/mainnet-laconicd/scripts/export-myaddress.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/export-myaddress.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/export-myaddress.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/export-myaddress.sh diff --git a/app/data/config/mainnet-laconicd/scripts/export-mykey.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/export-mykey.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/export-mykey.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/export-mykey.sh diff --git a/app/data/config/mainnet-laconicd/scripts/run-laconicd.sh b/stack_orchestrator/data/config/mainnet-laconicd/scripts/run-laconicd.sh similarity index 100% rename from app/data/config/mainnet-laconicd/scripts/run-laconicd.sh rename to stack_orchestrator/data/config/mainnet-laconicd/scripts/run-laconicd.sh diff --git a/app/data/config/network/wait-for-it.sh b/stack_orchestrator/data/config/network/wait-for-it.sh similarity index 100% rename from app/data/config/network/wait-for-it.sh rename to stack_orchestrator/data/config/network/wait-for-it.sh diff --git a/app/data/config/nitro-contracts/deploy.sh b/stack_orchestrator/data/config/nitro-contracts/deploy.sh similarity index 100% rename from app/data/config/nitro-contracts/deploy.sh rename to stack_orchestrator/data/config/nitro-contracts/deploy.sh diff --git a/app/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/rekey-json.ts diff --git a/app/data/config/optimism-contracts/hardhat-tasks/send-balance.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/send-balance.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/send-balance.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/send-balance.ts diff --git a/app/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts b/stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts similarity index 100% rename from app/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts rename to stack_orchestrator/data/config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts diff --git a/app/data/config/ponder/base-rates-config.json b/stack_orchestrator/data/config/ponder/base-rates-config.json similarity index 100% rename from app/data/config/ponder/base-rates-config.json rename to stack_orchestrator/data/config/ponder/base-rates-config.json diff --git a/app/data/config/ponder/deploy-erc20-contract.sh b/stack_orchestrator/data/config/ponder/deploy-erc20-contract.sh similarity index 100% rename from app/data/config/ponder/deploy-erc20-contract.sh rename to stack_orchestrator/data/config/ponder/deploy-erc20-contract.sh diff --git a/app/data/config/ponder/ponder-start.sh b/stack_orchestrator/data/config/ponder/ponder-start.sh similarity index 100% rename from app/data/config/ponder/ponder-start.sh rename to stack_orchestrator/data/config/ponder/ponder-start.sh diff --git a/app/data/config/ponder/ponder.indexer-1.config.ts b/stack_orchestrator/data/config/ponder/ponder.indexer-1.config.ts similarity index 100% rename from app/data/config/ponder/ponder.indexer-1.config.ts rename to stack_orchestrator/data/config/ponder/ponder.indexer-1.config.ts diff --git a/app/data/config/ponder/ponder.indexer-2.config.ts b/stack_orchestrator/data/config/ponder/ponder.indexer-2.config.ts similarity index 100% rename from app/data/config/ponder/ponder.indexer-2.config.ts rename to stack_orchestrator/data/config/ponder/ponder.indexer-2.config.ts diff --git a/app/data/config/ponder/ponder.watcher.config.ts b/stack_orchestrator/data/config/ponder/ponder.watcher.config.ts similarity index 100% rename from app/data/config/ponder/ponder.watcher.config.ts rename to stack_orchestrator/data/config/ponder/ponder.watcher.config.ts diff --git a/app/data/config/postgresql/create-pg-stat-statements.sql b/stack_orchestrator/data/config/postgresql/create-pg-stat-statements.sql similarity index 100% rename from app/data/config/postgresql/create-pg-stat-statements.sql rename to stack_orchestrator/data/config/postgresql/create-pg-stat-statements.sql diff --git a/app/data/config/postgresql/multiple-postgressql-databases.sh b/stack_orchestrator/data/config/postgresql/multiple-postgressql-databases.sh similarity index 100% rename from app/data/config/postgresql/multiple-postgressql-databases.sh rename to stack_orchestrator/data/config/postgresql/multiple-postgressql-databases.sh diff --git a/app/data/config/reth/start-lighthouse.sh b/stack_orchestrator/data/config/reth/start-lighthouse.sh similarity index 100% rename from app/data/config/reth/start-lighthouse.sh rename to stack_orchestrator/data/config/reth/start-lighthouse.sh diff --git a/app/data/config/reth/start-reth.sh b/stack_orchestrator/data/config/reth/start-reth.sh similarity index 100% rename from app/data/config/reth/start-reth.sh rename to stack_orchestrator/data/config/reth/start-reth.sh diff --git a/app/data/config/sushiswap-subgraph-v3/filecoin.js b/stack_orchestrator/data/config/sushiswap-subgraph-v3/filecoin.js similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/filecoin.js rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/filecoin.js diff --git a/app/data/config/sushiswap-subgraph-v3/run-blocks.sh b/stack_orchestrator/data/config/sushiswap-subgraph-v3/run-blocks.sh similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/run-blocks.sh rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/run-blocks.sh diff --git a/app/data/config/sushiswap-subgraph-v3/run-v3.sh b/stack_orchestrator/data/config/sushiswap-subgraph-v3/run-v3.sh similarity index 100% rename from app/data/config/sushiswap-subgraph-v3/run-v3.sh rename to stack_orchestrator/data/config/sushiswap-subgraph-v3/run-v3.sh diff --git a/app/data/config/tx-spammer/tx-spammer.env b/stack_orchestrator/data/config/tx-spammer/tx-spammer.env similarity index 100% rename from app/data/config/tx-spammer/tx-spammer.env rename to stack_orchestrator/data/config/tx-spammer/tx-spammer.env diff --git a/app/data/config/watcher-azimuth/gateway-watchers.json b/stack_orchestrator/data/config/watcher-azimuth/gateway-watchers.json similarity index 100% rename from app/data/config/watcher-azimuth/gateway-watchers.json rename to stack_orchestrator/data/config/watcher-azimuth/gateway-watchers.json diff --git a/app/data/config/watcher-azimuth/merge-toml.js b/stack_orchestrator/data/config/watcher-azimuth/merge-toml.js similarity index 100% rename from app/data/config/watcher-azimuth/merge-toml.js rename to stack_orchestrator/data/config/watcher-azimuth/merge-toml.js diff --git a/app/data/config/watcher-azimuth/start-server.sh b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh similarity index 100% rename from app/data/config/watcher-azimuth/start-server.sh rename to stack_orchestrator/data/config/watcher-azimuth/start-server.sh diff --git a/app/data/config/watcher-azimuth/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-azimuth/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml diff --git a/app/data/config/watcher-azimuth/watcher-params.env b/stack_orchestrator/data/config/watcher-azimuth/watcher-params.env similarity index 100% rename from app/data/config/watcher-azimuth/watcher-params.env rename to stack_orchestrator/data/config/watcher-azimuth/watcher-params.env diff --git a/app/data/config/watcher-erc20/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-erc20/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-erc20/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-erc20/erc20-watcher.toml diff --git a/app/data/config/watcher-erc721/erc721-watcher.toml b/stack_orchestrator/data/config/watcher-erc721/erc721-watcher.toml similarity index 100% rename from app/data/config/watcher-erc721/erc721-watcher.toml rename to stack_orchestrator/data/config/watcher-erc721/erc721-watcher.toml diff --git a/app/data/config/watcher-gelato/create-and-import-checkpoint.sh b/stack_orchestrator/data/config/watcher-gelato/create-and-import-checkpoint.sh similarity index 100% rename from app/data/config/watcher-gelato/create-and-import-checkpoint.sh rename to stack_orchestrator/data/config/watcher-gelato/create-and-import-checkpoint.sh diff --git a/app/data/config/watcher-gelato/start-job-runner.sh b/stack_orchestrator/data/config/watcher-gelato/start-job-runner.sh similarity index 100% rename from app/data/config/watcher-gelato/start-job-runner.sh rename to stack_orchestrator/data/config/watcher-gelato/start-job-runner.sh diff --git a/app/data/config/watcher-gelato/start-server.sh b/stack_orchestrator/data/config/watcher-gelato/start-server.sh similarity index 100% rename from app/data/config/watcher-gelato/start-server.sh rename to stack_orchestrator/data/config/watcher-gelato/start-server.sh diff --git a/app/data/config/watcher-gelato/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-gelato/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-gelato/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-gelato/watcher-config-template.toml diff --git a/app/data/config/watcher-gelato/watcher-params.env b/stack_orchestrator/data/config/watcher-gelato/watcher-params.env similarity index 100% rename from app/data/config/watcher-gelato/watcher-params.env rename to stack_orchestrator/data/config/watcher-gelato/watcher-params.env diff --git a/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh diff --git a/app/data/config/watcher-mobymask-v2/generate-peer-ids.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/generate-peer-ids.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/generate-peer-ids.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/generate-peer-ids.sh diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-config.json b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-config.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-app-config.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-config.json diff --git a/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-app-start.sh diff --git a/app/data/config/watcher-mobymask-v2/mobymask-params.env b/stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v2/mobymask-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v2/mobymask-params.env diff --git a/app/data/config/watcher-mobymask-v2/optimism-params.env b/stack_orchestrator/data/config/watcher-mobymask-v2/optimism-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v2/optimism-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v2/optimism-params.env diff --git a/app/data/config/watcher-mobymask-v2/secrets-template.json b/stack_orchestrator/data/config/watcher-mobymask-v2/secrets-template.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/secrets-template.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/secrets-template.json diff --git a/app/data/config/watcher-mobymask-v2/set-tests-env.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/set-tests-env.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/set-tests-env.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/set-tests-env.sh diff --git a/app/data/config/watcher-mobymask-v2/start-server.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/start-server.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/start-server.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/start-server.sh diff --git a/app/data/config/watcher-mobymask-v2/test-app-config.json b/stack_orchestrator/data/config/watcher-mobymask-v2/test-app-config.json similarity index 100% rename from app/data/config/watcher-mobymask-v2/test-app-config.json rename to stack_orchestrator/data/config/watcher-mobymask-v2/test-app-config.json diff --git a/app/data/config/watcher-mobymask-v2/test-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v2/test-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v2/test-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v2/test-app-start.sh diff --git a/app/data/config/watcher-mobymask-v2/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-mobymask-v2/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-mobymask-v2/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-mobymask-v2/watcher-config-template.toml diff --git a/app/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/deploy-and-generate-invite.sh diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWAMjBkFCT9DtCnSDcxftxJzSuTBvzVojabv64cnEvX4AZ.json diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWBNEbY3QS4y23ngupDw9PDc4bvNvRJGVRejjV9EZLjux5.json diff --git a/app/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json b/stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json similarity index 100% rename from app/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json rename to stack_orchestrator/data/config/watcher-mobymask-v3/keys/12D3KooWSRH6ftgkAZsKZK7UX1Zr6Hx6YAsEepHqzopFszqfTxxi.json diff --git a/app/data/config/watcher-mobymask-v3/mobymask-app-start.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-app-start.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/mobymask-app-start.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-app-start.sh diff --git a/app/data/config/watcher-mobymask-v3/mobymask-params.env b/stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-params.env similarity index 100% rename from app/data/config/watcher-mobymask-v3/mobymask-params.env rename to stack_orchestrator/data/config/watcher-mobymask-v3/mobymask-params.env diff --git a/app/data/config/watcher-mobymask-v3/start-server.sh b/stack_orchestrator/data/config/watcher-mobymask-v3/start-server.sh similarity index 100% rename from app/data/config/watcher-mobymask-v3/start-server.sh rename to stack_orchestrator/data/config/watcher-mobymask-v3/start-server.sh diff --git a/app/data/config/watcher-mobymask-v3/watcher-config-rates.toml b/stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-rates.toml similarity index 100% rename from app/data/config/watcher-mobymask-v3/watcher-config-rates.toml rename to stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-rates.toml diff --git a/app/data/config/watcher-mobymask-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-template.toml similarity index 100% rename from app/data/config/watcher-mobymask-v3/watcher-config-template.toml rename to stack_orchestrator/data/config/watcher-mobymask-v3/watcher-config-template.toml diff --git a/app/data/config/watcher-mobymask/mobymask-watcher-db.sql b/stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher-db.sql similarity index 100% rename from app/data/config/watcher-mobymask/mobymask-watcher-db.sql rename to stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher-db.sql diff --git a/app/data/config/watcher-mobymask/mobymask-watcher.toml b/stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher.toml similarity index 100% rename from app/data/config/watcher-mobymask/mobymask-watcher.toml rename to stack_orchestrator/data/config/watcher-mobymask/mobymask-watcher.toml diff --git a/app/data/config/watcher-sushiswap/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/erc20-watcher.toml diff --git a/app/data/config/watcher-sushiswap/lotus-params.env b/stack_orchestrator/data/config/watcher-sushiswap/lotus-params.env similarity index 100% rename from app/data/config/watcher-sushiswap/lotus-params.env rename to stack_orchestrator/data/config/watcher-sushiswap/lotus-params.env diff --git a/app/data/config/watcher-sushiswap/sushi-info-watcher-test.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher-test.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-info-watcher-test.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher-test.toml diff --git a/app/data/config/watcher-sushiswap/sushi-info-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-info-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-info-watcher.toml diff --git a/app/data/config/watcher-sushiswap/sushi-watcher-test.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher-test.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-watcher-test.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher-test.toml diff --git a/app/data/config/watcher-sushiswap/sushi-watcher.toml b/stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher.toml similarity index 100% rename from app/data/config/watcher-sushiswap/sushi-watcher.toml rename to stack_orchestrator/data/config/watcher-sushiswap/sushi-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/erc20-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/erc20-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/erc20-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/erc20-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/run.sh b/stack_orchestrator/data/config/watcher-uniswap-v3/run.sh similarity index 100% rename from app/data/config/watcher-uniswap-v3/run.sh rename to stack_orchestrator/data/config/watcher-uniswap-v3/run.sh diff --git a/app/data/config/watcher-uniswap-v3/uni-info-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/uni-info-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/uni-info-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/uni-info-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/uni-watcher.toml b/stack_orchestrator/data/config/watcher-uniswap-v3/uni-watcher.toml similarity index 100% rename from app/data/config/watcher-uniswap-v3/uni-watcher.toml rename to stack_orchestrator/data/config/watcher-uniswap-v3/uni-watcher.toml diff --git a/app/data/config/watcher-uniswap-v3/watch-contract.sh b/stack_orchestrator/data/config/watcher-uniswap-v3/watch-contract.sh similarity index 100% rename from app/data/config/watcher-uniswap-v3/watch-contract.sh rename to stack_orchestrator/data/config/watcher-uniswap-v3/watch-contract.sh diff --git a/app/data/container-build/build-base.sh b/stack_orchestrator/data/container-build/build-base.sh similarity index 100% rename from app/data/container-build/build-base.sh rename to stack_orchestrator/data/container-build/build-base.sh diff --git a/app/data/container-build/cerc-act-runner-task-executor/build.sh b/stack_orchestrator/data/container-build/cerc-act-runner-task-executor/build.sh similarity index 100% rename from app/data/container-build/cerc-act-runner-task-executor/build.sh rename to stack_orchestrator/data/container-build/cerc-act-runner-task-executor/build.sh diff --git a/app/data/container-build/cerc-act-runner/build.sh b/stack_orchestrator/data/container-build/cerc-act-runner/build.sh similarity index 100% rename from app/data/container-build/cerc-act-runner/build.sh rename to stack_orchestrator/data/container-build/cerc-act-runner/build.sh diff --git a/app/data/container-build/cerc-builder-gerbil/Dockerfile b/stack_orchestrator/data/container-build/cerc-builder-gerbil/Dockerfile similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/Dockerfile rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/Dockerfile diff --git a/app/data/container-build/cerc-builder-gerbil/README.md b/stack_orchestrator/data/container-build/cerc-builder-gerbil/README.md similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/README.md rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/README.md diff --git a/app/data/container-build/cerc-builder-gerbil/entrypoint.sh b/stack_orchestrator/data/container-build/cerc-builder-gerbil/entrypoint.sh similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/entrypoint.sh rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/entrypoint.sh diff --git a/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh b/stack_orchestrator/data/container-build/cerc-builder-gerbil/install-dependencies.sh similarity index 100% rename from app/data/container-build/cerc-builder-gerbil/install-dependencies.sh rename to stack_orchestrator/data/container-build/cerc-builder-gerbil/install-dependencies.sh diff --git a/app/data/container-build/cerc-builder-js/Dockerfile b/stack_orchestrator/data/container-build/cerc-builder-js/Dockerfile similarity index 100% rename from app/data/container-build/cerc-builder-js/Dockerfile rename to stack_orchestrator/data/container-build/cerc-builder-js/Dockerfile diff --git a/app/data/container-build/cerc-builder-js/README.md b/stack_orchestrator/data/container-build/cerc-builder-js/README.md similarity index 100% rename from app/data/container-build/cerc-builder-js/README.md rename to stack_orchestrator/data/container-build/cerc-builder-js/README.md diff --git a/app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh b/stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh diff --git a/app/data/container-build/cerc-builder-js/build-npm-package.sh b/stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/build-npm-package.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/build-npm-package.sh diff --git a/app/data/container-build/cerc-builder-js/check-uid.sh b/stack_orchestrator/data/container-build/cerc-builder-js/check-uid.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/check-uid.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/check-uid.sh diff --git a/app/data/container-build/cerc-builder-js/entrypoint.sh b/stack_orchestrator/data/container-build/cerc-builder-js/entrypoint.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/entrypoint.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/entrypoint.sh diff --git a/app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh b/stack_orchestrator/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh similarity index 100% rename from app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh rename to stack_orchestrator/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh diff --git a/app/data/container-build/cerc-eth-api-proxy/build.sh b/stack_orchestrator/data/container-build/cerc-eth-api-proxy/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-api-proxy/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-api-proxy/build.sh diff --git a/app/data/container-build/cerc-eth-probe/build.sh b/stack_orchestrator/data/container-build/cerc-eth-probe/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-probe/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-probe/build.sh diff --git a/app/data/container-build/cerc-eth-statediff-fill-service/build.sh b/stack_orchestrator/data/container-build/cerc-eth-statediff-fill-service/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-statediff-fill-service/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-statediff-fill-service/build.sh diff --git a/app/data/container-build/cerc-eth-statediff-service/build.sh b/stack_orchestrator/data/container-build/cerc-eth-statediff-service/build.sh similarity index 100% rename from app/data/container-build/cerc-eth-statediff-service/build.sh rename to stack_orchestrator/data/container-build/cerc-eth-statediff-service/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/Makefile diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/import_keys.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/accounts/mnemonic_to_csv.py diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/build_el.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-genesis/genesis/el/el-config.yaml diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/run-el.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-geth/run-el.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/export-ethdb.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh diff --git a/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh diff --git a/app/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile b/stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/Dockerfile diff --git a/app/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh b/stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh similarity index 100% rename from app/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh rename to stack_orchestrator/data/container-build/cerc-fixturenet-plugeth-plugeth/build.sh diff --git a/app/data/container-build/cerc-foundry/build.sh b/stack_orchestrator/data/container-build/cerc-foundry/build.sh similarity index 100% rename from app/data/container-build/cerc-foundry/build.sh rename to stack_orchestrator/data/container-build/cerc-foundry/build.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/Dockerfile b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/Dockerfile similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/Dockerfile rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/Dockerfile diff --git a/app/data/container-build/cerc-go-ethereum-foundry/build.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/build.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/build.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/build.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json diff --git a/app/data/container-build/cerc-go-ethereum-foundry/genesis.json b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis.json similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/genesis.json rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/genesis.json diff --git a/app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/LICENSE diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/Makefile diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/default.nix diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/demo/demo.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/lib/ds-test/src/test.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol diff --git a/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol b/stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol similarity index 100% rename from app/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol rename to stack_orchestrator/data/container-build/cerc-go-ethereum-foundry/stateful/src/test/Stateful.t.sol diff --git a/app/data/container-build/cerc-go-ethereum/build.sh b/stack_orchestrator/data/container-build/cerc-go-ethereum/build.sh similarity index 100% rename from app/data/container-build/cerc-go-ethereum/build.sh rename to stack_orchestrator/data/container-build/cerc-go-ethereum/build.sh diff --git a/app/data/container-build/cerc-go-nitro/Dockerfile b/stack_orchestrator/data/container-build/cerc-go-nitro/Dockerfile similarity index 100% rename from app/data/container-build/cerc-go-nitro/Dockerfile rename to stack_orchestrator/data/container-build/cerc-go-nitro/Dockerfile diff --git a/app/data/container-build/cerc-go-nitro/build.sh b/stack_orchestrator/data/container-build/cerc-go-nitro/build.sh similarity index 100% rename from app/data/container-build/cerc-go-nitro/build.sh rename to stack_orchestrator/data/container-build/cerc-go-nitro/build.sh diff --git a/app/data/container-build/cerc-go-opera/build.sh b/stack_orchestrator/data/container-build/cerc-go-opera/build.sh similarity index 100% rename from app/data/container-build/cerc-go-opera/build.sh rename to stack_orchestrator/data/container-build/cerc-go-opera/build.sh diff --git a/app/data/container-build/cerc-graph-node/build.sh b/stack_orchestrator/data/container-build/cerc-graph-node/build.sh similarity index 100% rename from app/data/container-build/cerc-graph-node/build.sh rename to stack_orchestrator/data/container-build/cerc-graph-node/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-beacon-db/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-db/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-beacon-db/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-db/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-db/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-db/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-db/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-db/build.sh diff --git a/app/data/container-build/cerc-ipld-eth-server/build.sh b/stack_orchestrator/data/container-build/cerc-ipld-eth-server/build.sh similarity index 100% rename from app/data/container-build/cerc-ipld-eth-server/build.sh rename to stack_orchestrator/data/container-build/cerc-ipld-eth-server/build.sh diff --git a/app/data/container-build/cerc-keycloak-reg-api/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak-reg-api/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak-reg-api/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak-reg-api/build.sh diff --git a/app/data/container-build/cerc-keycloak-reg-ui/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak-reg-ui/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak-reg-ui/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak-reg-ui/build.sh diff --git a/app/data/container-build/cerc-keycloak/Dockerfile b/stack_orchestrator/data/container-build/cerc-keycloak/Dockerfile similarity index 100% rename from app/data/container-build/cerc-keycloak/Dockerfile rename to stack_orchestrator/data/container-build/cerc-keycloak/Dockerfile diff --git a/app/data/container-build/cerc-keycloak/build.sh b/stack_orchestrator/data/container-build/cerc-keycloak/build.sh similarity index 100% rename from app/data/container-build/cerc-keycloak/build.sh rename to stack_orchestrator/data/container-build/cerc-keycloak/build.sh diff --git a/app/data/container-build/cerc-laconic-console-host/Dockerfile b/stack_orchestrator/data/container-build/cerc-laconic-console-host/Dockerfile similarity index 100% rename from app/data/container-build/cerc-laconic-console-host/Dockerfile rename to stack_orchestrator/data/container-build/cerc-laconic-console-host/Dockerfile diff --git a/app/data/container-build/cerc-laconic-console-host/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-console-host/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-console-host/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-console-host/build.sh diff --git a/app/data/container-build/cerc-laconic-console-host/config.yml b/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml similarity index 100% rename from app/data/container-build/cerc-laconic-console-host/config.yml rename to stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml diff --git a/app/data/container-build/cerc-laconic-dot-com/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-dot-com/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-dot-com/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-dot-com/build.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/Dockerfile b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/Dockerfile similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/Dockerfile rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/Dockerfile diff --git a/app/data/container-build/cerc-laconic-registry-cli/build.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/build.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/build.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/build.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-1.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-2.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-3.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-4.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-5.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/demo-records/demo-record-6.yml diff --git a/app/data/container-build/cerc-laconic-registry-cli/import-address.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-address.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/import-address.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-address.sh diff --git a/app/data/container-build/cerc-laconic-registry-cli/import-key.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-key.sh similarity index 100% rename from app/data/container-build/cerc-laconic-registry-cli/import-key.sh rename to stack_orchestrator/data/container-build/cerc-laconic-registry-cli/import-key.sh diff --git a/app/data/container-build/cerc-laconicd/build.sh b/stack_orchestrator/data/container-build/cerc-laconicd/build.sh similarity index 100% rename from app/data/container-build/cerc-laconicd/build.sh rename to stack_orchestrator/data/container-build/cerc-laconicd/build.sh diff --git a/app/data/container-build/cerc-lasso/build.sh b/stack_orchestrator/data/container-build/cerc-lasso/build.sh similarity index 100% rename from app/data/container-build/cerc-lasso/build.sh rename to stack_orchestrator/data/container-build/cerc-lasso/build.sh diff --git a/app/data/container-build/cerc-lighthouse-cli/build.sh b/stack_orchestrator/data/container-build/cerc-lighthouse-cli/build.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse-cli/build.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse-cli/build.sh diff --git a/app/data/container-build/cerc-lighthouse/Dockerfile b/stack_orchestrator/data/container-build/cerc-lighthouse/Dockerfile similarity index 100% rename from app/data/container-build/cerc-lighthouse/Dockerfile rename to stack_orchestrator/data/container-build/cerc-lighthouse/Dockerfile diff --git a/app/data/container-build/cerc-lighthouse/build.sh b/stack_orchestrator/data/container-build/cerc-lighthouse/build.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse/build.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse/build.sh diff --git a/app/data/container-build/cerc-lighthouse/start-lighthouse.sh b/stack_orchestrator/data/container-build/cerc-lighthouse/start-lighthouse.sh similarity index 100% rename from app/data/container-build/cerc-lighthouse/start-lighthouse.sh rename to stack_orchestrator/data/container-build/cerc-lighthouse/start-lighthouse.sh diff --git a/app/data/container-build/cerc-lotus/Dockerfile b/stack_orchestrator/data/container-build/cerc-lotus/Dockerfile similarity index 100% rename from app/data/container-build/cerc-lotus/Dockerfile rename to stack_orchestrator/data/container-build/cerc-lotus/Dockerfile diff --git a/app/data/container-build/cerc-lotus/build.sh b/stack_orchestrator/data/container-build/cerc-lotus/build.sh similarity index 100% rename from app/data/container-build/cerc-lotus/build.sh rename to stack_orchestrator/data/container-build/cerc-lotus/build.sh diff --git a/app/data/container-build/cerc-mobymask-snap/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask-snap/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask-snap/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask-snap/Dockerfile diff --git a/app/data/container-build/cerc-mobymask-snap/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask-snap/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask-snap/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask-snap/build.sh diff --git a/app/data/container-build/cerc-mobymask-ui/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask-ui/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask-ui/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask-ui/Dockerfile diff --git a/app/data/container-build/cerc-mobymask-ui/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask-ui/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask-ui/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask-ui/build.sh diff --git a/app/data/container-build/cerc-mobymask/Dockerfile b/stack_orchestrator/data/container-build/cerc-mobymask/Dockerfile similarity index 100% rename from app/data/container-build/cerc-mobymask/Dockerfile rename to stack_orchestrator/data/container-build/cerc-mobymask/Dockerfile diff --git a/app/data/container-build/cerc-mobymask/build.sh b/stack_orchestrator/data/container-build/cerc-mobymask/build.sh similarity index 100% rename from app/data/container-build/cerc-mobymask/build.sh rename to stack_orchestrator/data/container-build/cerc-mobymask/build.sh diff --git a/app/data/container-build/cerc-nitro-contracts/Dockerfile b/stack_orchestrator/data/container-build/cerc-nitro-contracts/Dockerfile similarity index 100% rename from app/data/container-build/cerc-nitro-contracts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-nitro-contracts/Dockerfile diff --git a/app/data/container-build/cerc-nitro-contracts/build.sh b/stack_orchestrator/data/container-build/cerc-nitro-contracts/build.sh similarity index 100% rename from app/data/container-build/cerc-nitro-contracts/build.sh rename to stack_orchestrator/data/container-build/cerc-nitro-contracts/build.sh diff --git a/app/data/container-build/cerc-nitro-rpc-client/Dockerfile b/stack_orchestrator/data/container-build/cerc-nitro-rpc-client/Dockerfile similarity index 100% rename from app/data/container-build/cerc-nitro-rpc-client/Dockerfile rename to stack_orchestrator/data/container-build/cerc-nitro-rpc-client/Dockerfile diff --git a/app/data/container-build/cerc-nitro-rpc-client/build.sh b/stack_orchestrator/data/container-build/cerc-nitro-rpc-client/build.sh similarity index 100% rename from app/data/container-build/cerc-nitro-rpc-client/build.sh rename to stack_orchestrator/data/container-build/cerc-nitro-rpc-client/build.sh diff --git a/app/data/container-build/cerc-optimism-contracts/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile similarity index 100% rename from app/data/container-build/cerc-optimism-contracts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile diff --git a/app/data/container-build/cerc-optimism-contracts/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-contracts/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-contracts/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-contracts/build.sh diff --git a/app/data/container-build/cerc-optimism-l2geth/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-l2geth/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-l2geth/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-l2geth/build.sh diff --git a/app/data/container-build/cerc-optimism-op-batcher/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile similarity index 100% rename from app/data/container-build/cerc-optimism-op-batcher/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile diff --git a/app/data/container-build/cerc-optimism-op-batcher/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-batcher/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-batcher/build.sh diff --git a/app/data/container-build/cerc-optimism-op-node/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile similarity index 100% rename from app/data/container-build/cerc-optimism-op-node/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile diff --git a/app/data/container-build/cerc-optimism-op-node/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-node/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-node/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-node/build.sh diff --git a/app/data/container-build/cerc-optimism-op-proposer/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile similarity index 100% rename from app/data/container-build/cerc-optimism-op-proposer/Dockerfile rename to stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile diff --git a/app/data/container-build/cerc-optimism-op-proposer/build.sh b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/build.sh similarity index 100% rename from app/data/container-build/cerc-optimism-op-proposer/build.sh rename to stack_orchestrator/data/container-build/cerc-optimism-op-proposer/build.sh diff --git a/app/data/container-build/cerc-plugeth-statediff/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth-statediff/build.sh similarity index 100% rename from app/data/container-build/cerc-plugeth-statediff/build.sh rename to stack_orchestrator/data/container-build/cerc-plugeth-statediff/build.sh diff --git a/app/data/container-build/cerc-plugeth-with-plugins/Dockerfile b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/Dockerfile similarity index 100% rename from app/data/container-build/cerc-plugeth-with-plugins/Dockerfile rename to stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/Dockerfile diff --git a/app/data/container-build/cerc-plugeth-with-plugins/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/build.sh similarity index 100% rename from app/data/container-build/cerc-plugeth-with-plugins/build.sh rename to stack_orchestrator/data/container-build/cerc-plugeth-with-plugins/build.sh diff --git a/app/data/container-build/cerc-plugeth/build.sh b/stack_orchestrator/data/container-build/cerc-plugeth/build.sh similarity index 100% rename from app/data/container-build/cerc-plugeth/build.sh rename to stack_orchestrator/data/container-build/cerc-plugeth/build.sh diff --git a/app/data/container-build/cerc-pocket/build.sh b/stack_orchestrator/data/container-build/cerc-pocket/build.sh similarity index 100% rename from app/data/container-build/cerc-pocket/build.sh rename to stack_orchestrator/data/container-build/cerc-pocket/build.sh diff --git a/app/data/container-build/cerc-ponder/Dockerfile b/stack_orchestrator/data/container-build/cerc-ponder/Dockerfile similarity index 100% rename from app/data/container-build/cerc-ponder/Dockerfile rename to stack_orchestrator/data/container-build/cerc-ponder/Dockerfile diff --git a/app/data/container-build/cerc-ponder/build.sh b/stack_orchestrator/data/container-build/cerc-ponder/build.sh similarity index 100% rename from app/data/container-build/cerc-ponder/build.sh rename to stack_orchestrator/data/container-build/cerc-ponder/build.sh diff --git a/app/data/container-build/cerc-react-peer/Dockerfile b/stack_orchestrator/data/container-build/cerc-react-peer/Dockerfile similarity index 100% rename from app/data/container-build/cerc-react-peer/Dockerfile rename to stack_orchestrator/data/container-build/cerc-react-peer/Dockerfile diff --git a/app/data/container-build/cerc-react-peer/apply-webapp-config.sh b/stack_orchestrator/data/container-build/cerc-react-peer/apply-webapp-config.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/apply-webapp-config.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/apply-webapp-config.sh diff --git a/app/data/container-build/cerc-react-peer/build.sh b/stack_orchestrator/data/container-build/cerc-react-peer/build.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/build.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/build.sh diff --git a/app/data/container-build/cerc-react-peer/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-react-peer/start-serving-app.sh similarity index 100% rename from app/data/container-build/cerc-react-peer/start-serving-app.sh rename to stack_orchestrator/data/container-build/cerc-react-peer/start-serving-app.sh diff --git a/app/data/container-build/cerc-reth/build.sh b/stack_orchestrator/data/container-build/cerc-reth/build.sh similarity index 100% rename from app/data/container-build/cerc-reth/build.sh rename to stack_orchestrator/data/container-build/cerc-reth/build.sh diff --git a/app/data/container-build/cerc-sushiswap-subgraphs/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-subgraphs/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-subgraphs/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-subgraphs/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-subgraphs/build.sh diff --git a/app/data/container-build/cerc-sushiswap-v3-core/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-core/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-v3-core/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-core/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-core/build.sh diff --git a/app/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/Dockerfile diff --git a/app/data/container-build/cerc-sushiswap-v3-periphery/build.sh b/stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/build.sh similarity index 100% rename from app/data/container-build/cerc-sushiswap-v3-periphery/build.sh rename to stack_orchestrator/data/container-build/cerc-sushiswap-v3-periphery/build.sh diff --git a/app/data/container-build/cerc-test-container/Dockerfile b/stack_orchestrator/data/container-build/cerc-test-container/Dockerfile similarity index 100% rename from app/data/container-build/cerc-test-container/Dockerfile rename to stack_orchestrator/data/container-build/cerc-test-container/Dockerfile diff --git a/app/data/container-build/cerc-test-container/build.sh b/stack_orchestrator/data/container-build/cerc-test-container/build.sh similarity index 100% rename from app/data/container-build/cerc-test-container/build.sh rename to stack_orchestrator/data/container-build/cerc-test-container/build.sh diff --git a/app/data/container-build/cerc-test-container/run.sh b/stack_orchestrator/data/container-build/cerc-test-container/run.sh similarity index 100% rename from app/data/container-build/cerc-test-container/run.sh rename to stack_orchestrator/data/container-build/cerc-test-container/run.sh diff --git a/app/data/container-build/cerc-test-contract/build.sh b/stack_orchestrator/data/container-build/cerc-test-contract/build.sh similarity index 100% rename from app/data/container-build/cerc-test-contract/build.sh rename to stack_orchestrator/data/container-build/cerc-test-contract/build.sh diff --git a/app/data/container-build/cerc-tx-spammer/build.sh b/stack_orchestrator/data/container-build/cerc-tx-spammer/build.sh similarity index 100% rename from app/data/container-build/cerc-tx-spammer/build.sh rename to stack_orchestrator/data/container-build/cerc-tx-spammer/build.sh diff --git a/app/data/container-build/cerc-uniswap-v3-info/Dockerfile b/stack_orchestrator/data/container-build/cerc-uniswap-v3-info/Dockerfile similarity index 100% rename from app/data/container-build/cerc-uniswap-v3-info/Dockerfile rename to stack_orchestrator/data/container-build/cerc-uniswap-v3-info/Dockerfile diff --git a/app/data/container-build/cerc-uniswap-v3-info/build.sh b/stack_orchestrator/data/container-build/cerc-uniswap-v3-info/build.sh similarity index 100% rename from app/data/container-build/cerc-uniswap-v3-info/build.sh rename to stack_orchestrator/data/container-build/cerc-uniswap-v3-info/build.sh diff --git a/app/data/container-build/cerc-watcher-azimuth/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-azimuth/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-azimuth/Dockerfile diff --git a/app/data/container-build/cerc-watcher-azimuth/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-azimuth/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-azimuth/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-azimuth/build.sh diff --git a/app/data/container-build/cerc-watcher-erc20/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-erc20/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-erc20/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-erc20/Dockerfile diff --git a/app/data/container-build/cerc-watcher-erc20/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-erc20/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-erc20/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-erc20/build.sh diff --git a/app/data/container-build/cerc-watcher-erc721/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-erc721/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-erc721/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-erc721/Dockerfile diff --git a/app/data/container-build/cerc-watcher-erc721/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-erc721/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-erc721/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-erc721/build.sh diff --git a/app/data/container-build/cerc-watcher-gelato/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-gelato/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-gelato/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-gelato/Dockerfile diff --git a/app/data/container-build/cerc-watcher-gelato/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-gelato/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-gelato/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-gelato/build.sh diff --git a/app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask-v2/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v2/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v2/build.sh diff --git a/app/data/container-build/cerc-watcher-mobymask-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v3/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask-v3/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask-v3/build.sh diff --git a/app/data/container-build/cerc-watcher-mobymask/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-mobymask/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask/Dockerfile diff --git a/app/data/container-build/cerc-watcher-mobymask/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-mobymask/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-mobymask/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-mobymask/build.sh diff --git a/app/data/container-build/cerc-watcher-sushiswap/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-sushiswap/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-sushiswap/Dockerfile diff --git a/app/data/container-build/cerc-watcher-sushiswap/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-sushiswap/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-sushiswap/build.sh diff --git a/app/data/container-build/cerc-watcher-ts/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-ts/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-ts/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-ts/Dockerfile diff --git a/app/data/container-build/cerc-watcher-ts/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-ts/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-ts/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-ts/build.sh diff --git a/app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/Dockerfile similarity index 100% rename from app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile rename to stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/Dockerfile diff --git a/app/data/container-build/cerc-watcher-uniswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/build.sh similarity index 100% rename from app/data/container-build/cerc-watcher-uniswap-v3/build.sh rename to stack_orchestrator/data/container-build/cerc-watcher-uniswap-v3/build.sh diff --git a/app/data/container-build/cerc-webapp-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile similarity index 100% rename from app/data/container-build/cerc-webapp-base/Dockerfile rename to stack_orchestrator/data/container-build/cerc-webapp-base/Dockerfile diff --git a/app/data/container-build/cerc-webapp-base/apply-webapp-config.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/apply-webapp-config.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/apply-webapp-config.sh diff --git a/app/data/container-build/cerc-webapp-base/build.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/build.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/build.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/build.sh diff --git a/app/data/container-build/cerc-webapp-base/config.yml b/stack_orchestrator/data/container-build/cerc-webapp-base/config.yml similarity index 100% rename from app/data/container-build/cerc-webapp-base/config.yml rename to stack_orchestrator/data/container-build/cerc-webapp-base/config.yml diff --git a/app/data/container-build/cerc-webapp-base/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh similarity index 100% rename from app/data/container-build/cerc-webapp-base/start-serving-app.sh rename to stack_orchestrator/data/container-build/cerc-webapp-base/start-serving-app.sh diff --git a/app/data/container-build/default-build.sh b/stack_orchestrator/data/container-build/default-build.sh similarity index 100% rename from app/data/container-build/default-build.sh rename to stack_orchestrator/data/container-build/default-build.sh diff --git a/app/data/container-image-list.txt b/stack_orchestrator/data/container-image-list.txt similarity index 100% rename from app/data/container-image-list.txt rename to stack_orchestrator/data/container-image-list.txt diff --git a/app/data/npm-package-list.txt b/stack_orchestrator/data/npm-package-list.txt similarity index 100% rename from app/data/npm-package-list.txt rename to stack_orchestrator/data/npm-package-list.txt diff --git a/app/data/pod-list.txt b/stack_orchestrator/data/pod-list.txt similarity index 100% rename from app/data/pod-list.txt rename to stack_orchestrator/data/pod-list.txt diff --git a/app/data/repository-list.txt b/stack_orchestrator/data/repository-list.txt similarity index 100% rename from app/data/repository-list.txt rename to stack_orchestrator/data/repository-list.txt diff --git a/app/data/stacks/act-runner/README.md b/stack_orchestrator/data/stacks/act-runner/README.md similarity index 100% rename from app/data/stacks/act-runner/README.md rename to stack_orchestrator/data/stacks/act-runner/README.md diff --git a/app/data/stacks/act-runner/stack.yml b/stack_orchestrator/data/stacks/act-runner/stack.yml similarity index 100% rename from app/data/stacks/act-runner/stack.yml rename to stack_orchestrator/data/stacks/act-runner/stack.yml diff --git a/app/data/stacks/azimuth/README.md b/stack_orchestrator/data/stacks/azimuth/README.md similarity index 100% rename from app/data/stacks/azimuth/README.md rename to stack_orchestrator/data/stacks/azimuth/README.md diff --git a/app/data/stacks/azimuth/stack.yml b/stack_orchestrator/data/stacks/azimuth/stack.yml similarity index 100% rename from app/data/stacks/azimuth/stack.yml rename to stack_orchestrator/data/stacks/azimuth/stack.yml diff --git a/app/data/stacks/build-support/README.md b/stack_orchestrator/data/stacks/build-support/README.md similarity index 100% rename from app/data/stacks/build-support/README.md rename to stack_orchestrator/data/stacks/build-support/README.md diff --git a/app/data/stacks/build-support/stack.yml b/stack_orchestrator/data/stacks/build-support/stack.yml similarity index 100% rename from app/data/stacks/build-support/stack.yml rename to stack_orchestrator/data/stacks/build-support/stack.yml diff --git a/app/data/stacks/chain-chunker/README.md b/stack_orchestrator/data/stacks/chain-chunker/README.md similarity index 100% rename from app/data/stacks/chain-chunker/README.md rename to stack_orchestrator/data/stacks/chain-chunker/README.md diff --git a/app/data/stacks/chain-chunker/stack.yml b/stack_orchestrator/data/stacks/chain-chunker/stack.yml similarity index 100% rename from app/data/stacks/chain-chunker/stack.yml rename to stack_orchestrator/data/stacks/chain-chunker/stack.yml diff --git a/app/data/stacks/erc20/README.md b/stack_orchestrator/data/stacks/erc20/README.md similarity index 100% rename from app/data/stacks/erc20/README.md rename to stack_orchestrator/data/stacks/erc20/README.md diff --git a/app/data/stacks/erc20/stack.yml b/stack_orchestrator/data/stacks/erc20/stack.yml similarity index 100% rename from app/data/stacks/erc20/stack.yml rename to stack_orchestrator/data/stacks/erc20/stack.yml diff --git a/app/data/stacks/erc721/README.md b/stack_orchestrator/data/stacks/erc721/README.md similarity index 100% rename from app/data/stacks/erc721/README.md rename to stack_orchestrator/data/stacks/erc721/README.md diff --git a/app/data/stacks/erc721/stack.yml b/stack_orchestrator/data/stacks/erc721/stack.yml similarity index 100% rename from app/data/stacks/erc721/stack.yml rename to stack_orchestrator/data/stacks/erc721/stack.yml diff --git a/app/data/stacks/fixturenet-eth-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-eth-loaded/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth-loaded/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth-loaded/README.md diff --git a/app/data/stacks/fixturenet-eth-loaded/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth-loaded/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth-loaded/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth-loaded/stack.yml diff --git a/app/data/stacks/fixturenet-eth-tx/README.md b/stack_orchestrator/data/stacks/fixturenet-eth-tx/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth-tx/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth-tx/README.md diff --git a/app/data/stacks/fixturenet-eth-tx/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth-tx/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth-tx/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth-tx/stack.yml diff --git a/app/data/stacks/fixturenet-eth/README.md b/stack_orchestrator/data/stacks/fixturenet-eth/README.md similarity index 100% rename from app/data/stacks/fixturenet-eth/README.md rename to stack_orchestrator/data/stacks/fixturenet-eth/README.md diff --git a/app/data/stacks/fixturenet-eth/stack.yml b/stack_orchestrator/data/stacks/fixturenet-eth/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-eth/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-eth/stack.yml diff --git a/app/data/stacks/fixturenet-laconic-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md similarity index 100% rename from app/data/stacks/fixturenet-laconic-loaded/README.md rename to stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md diff --git a/app/data/stacks/fixturenet-laconic-loaded/stack.yml b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-laconic-loaded/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-laconic-loaded/stack.yml diff --git a/app/data/stacks/fixturenet-laconicd/README.md b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md similarity index 100% rename from app/data/stacks/fixturenet-laconicd/README.md rename to stack_orchestrator/data/stacks/fixturenet-laconicd/README.md diff --git a/app/data/stacks/fixturenet-laconicd/stack.yml b/stack_orchestrator/data/stacks/fixturenet-laconicd/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-laconicd/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-laconicd/stack.yml diff --git a/app/data/stacks/fixturenet-lotus/README.md b/stack_orchestrator/data/stacks/fixturenet-lotus/README.md similarity index 100% rename from app/data/stacks/fixturenet-lotus/README.md rename to stack_orchestrator/data/stacks/fixturenet-lotus/README.md diff --git a/app/data/stacks/fixturenet-lotus/stack.yml b/stack_orchestrator/data/stacks/fixturenet-lotus/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-lotus/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-lotus/stack.yml diff --git a/app/data/stacks/fixturenet-optimism/README.md b/stack_orchestrator/data/stacks/fixturenet-optimism/README.md similarity index 100% rename from app/data/stacks/fixturenet-optimism/README.md rename to stack_orchestrator/data/stacks/fixturenet-optimism/README.md diff --git a/app/data/stacks/fixturenet-optimism/l2-only.md b/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md similarity index 100% rename from app/data/stacks/fixturenet-optimism/l2-only.md rename to stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md diff --git a/app/data/stacks/fixturenet-optimism/stack.yml b/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-optimism/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml diff --git a/app/data/stacks/fixturenet-payments/README.md b/stack_orchestrator/data/stacks/fixturenet-payments/README.md similarity index 100% rename from app/data/stacks/fixturenet-payments/README.md rename to stack_orchestrator/data/stacks/fixturenet-payments/README.md diff --git a/app/data/stacks/fixturenet-payments/mobymask-demo.md b/stack_orchestrator/data/stacks/fixturenet-payments/mobymask-demo.md similarity index 100% rename from app/data/stacks/fixturenet-payments/mobymask-demo.md rename to stack_orchestrator/data/stacks/fixturenet-payments/mobymask-demo.md diff --git a/app/data/stacks/fixturenet-payments/ponder-demo.md b/stack_orchestrator/data/stacks/fixturenet-payments/ponder-demo.md similarity index 100% rename from app/data/stacks/fixturenet-payments/ponder-demo.md rename to stack_orchestrator/data/stacks/fixturenet-payments/ponder-demo.md diff --git a/app/data/stacks/fixturenet-payments/stack.yml b/stack_orchestrator/data/stacks/fixturenet-payments/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-payments/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-payments/stack.yml diff --git a/app/data/stacks/fixturenet-plugeth-tx/README.md b/stack_orchestrator/data/stacks/fixturenet-plugeth-tx/README.md similarity index 100% rename from app/data/stacks/fixturenet-plugeth-tx/README.md rename to stack_orchestrator/data/stacks/fixturenet-plugeth-tx/README.md diff --git a/app/data/stacks/fixturenet-plugeth-tx/stack.yml b/stack_orchestrator/data/stacks/fixturenet-plugeth-tx/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-plugeth-tx/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-plugeth-tx/stack.yml diff --git a/app/data/stacks/fixturenet-pocket/README.md b/stack_orchestrator/data/stacks/fixturenet-pocket/README.md similarity index 100% rename from app/data/stacks/fixturenet-pocket/README.md rename to stack_orchestrator/data/stacks/fixturenet-pocket/README.md diff --git a/app/data/stacks/fixturenet-pocket/stack.yml b/stack_orchestrator/data/stacks/fixturenet-pocket/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-pocket/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-pocket/stack.yml diff --git a/app/data/stacks/fixturenet-sushiswap-subgraph/README.md b/stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/README.md similarity index 100% rename from app/data/stacks/fixturenet-sushiswap-subgraph/README.md rename to stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/README.md diff --git a/app/data/stacks/fixturenet-sushiswap-subgraph/stack.yml b/stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/stack.yml similarity index 100% rename from app/data/stacks/fixturenet-sushiswap-subgraph/stack.yml rename to stack_orchestrator/data/stacks/fixturenet-sushiswap-subgraph/stack.yml diff --git a/app/data/stacks/gelato/README.md b/stack_orchestrator/data/stacks/gelato/README.md similarity index 100% rename from app/data/stacks/gelato/README.md rename to stack_orchestrator/data/stacks/gelato/README.md diff --git a/app/data/stacks/gelato/stack.yml b/stack_orchestrator/data/stacks/gelato/stack.yml similarity index 100% rename from app/data/stacks/gelato/stack.yml rename to stack_orchestrator/data/stacks/gelato/stack.yml diff --git a/app/data/stacks/graph-node/README.md b/stack_orchestrator/data/stacks/graph-node/README.md similarity index 100% rename from app/data/stacks/graph-node/README.md rename to stack_orchestrator/data/stacks/graph-node/README.md diff --git a/app/data/stacks/graph-node/deploy-subgraph.md b/stack_orchestrator/data/stacks/graph-node/deploy-subgraph.md similarity index 100% rename from app/data/stacks/graph-node/deploy-subgraph.md rename to stack_orchestrator/data/stacks/graph-node/deploy-subgraph.md diff --git a/app/data/stacks/graph-node/stack.yml b/stack_orchestrator/data/stacks/graph-node/stack.yml similarity index 100% rename from app/data/stacks/graph-node/stack.yml rename to stack_orchestrator/data/stacks/graph-node/stack.yml diff --git a/app/data/stacks/kubo/README.md b/stack_orchestrator/data/stacks/kubo/README.md similarity index 100% rename from app/data/stacks/kubo/README.md rename to stack_orchestrator/data/stacks/kubo/README.md diff --git a/app/data/stacks/kubo/stack.yml b/stack_orchestrator/data/stacks/kubo/stack.yml similarity index 100% rename from app/data/stacks/kubo/stack.yml rename to stack_orchestrator/data/stacks/kubo/stack.yml diff --git a/app/data/stacks/laconic-dot-com/README.md b/stack_orchestrator/data/stacks/laconic-dot-com/README.md similarity index 100% rename from app/data/stacks/laconic-dot-com/README.md rename to stack_orchestrator/data/stacks/laconic-dot-com/README.md diff --git a/app/data/stacks/laconic-dot-com/stack.yml b/stack_orchestrator/data/stacks/laconic-dot-com/stack.yml similarity index 100% rename from app/data/stacks/laconic-dot-com/stack.yml rename to stack_orchestrator/data/stacks/laconic-dot-com/stack.yml diff --git a/app/data/stacks/lasso/README.md b/stack_orchestrator/data/stacks/lasso/README.md similarity index 100% rename from app/data/stacks/lasso/README.md rename to stack_orchestrator/data/stacks/lasso/README.md diff --git a/app/data/stacks/lasso/stack.yml b/stack_orchestrator/data/stacks/lasso/stack.yml similarity index 100% rename from app/data/stacks/lasso/stack.yml rename to stack_orchestrator/data/stacks/lasso/stack.yml diff --git a/app/data/stacks/mainnet-eth-plugeth/README.md b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/README.md similarity index 100% rename from app/data/stacks/mainnet-eth-plugeth/README.md rename to stack_orchestrator/data/stacks/mainnet-eth-plugeth/README.md diff --git a/app/data/stacks/mainnet-eth-plugeth/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py similarity index 100% rename from app/data/stacks/mainnet-eth-plugeth/deploy/commands.py rename to stack_orchestrator/data/stacks/mainnet-eth-plugeth/deploy/commands.py diff --git a/app/data/stacks/mainnet-eth-plugeth/stack.yml b/stack_orchestrator/data/stacks/mainnet-eth-plugeth/stack.yml similarity index 100% rename from app/data/stacks/mainnet-eth-plugeth/stack.yml rename to stack_orchestrator/data/stacks/mainnet-eth-plugeth/stack.yml diff --git a/app/data/stacks/mainnet-eth/README.md b/stack_orchestrator/data/stacks/mainnet-eth/README.md similarity index 100% rename from app/data/stacks/mainnet-eth/README.md rename to stack_orchestrator/data/stacks/mainnet-eth/README.md diff --git a/app/data/stacks/mainnet-eth/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py similarity index 100% rename from app/data/stacks/mainnet-eth/deploy/commands.py rename to stack_orchestrator/data/stacks/mainnet-eth/deploy/commands.py diff --git a/app/data/stacks/mainnet-eth/stack.yml b/stack_orchestrator/data/stacks/mainnet-eth/stack.yml similarity index 100% rename from app/data/stacks/mainnet-eth/stack.yml rename to stack_orchestrator/data/stacks/mainnet-eth/stack.yml diff --git a/app/data/stacks/mainnet-go-opera/README.md b/stack_orchestrator/data/stacks/mainnet-go-opera/README.md similarity index 100% rename from app/data/stacks/mainnet-go-opera/README.md rename to stack_orchestrator/data/stacks/mainnet-go-opera/README.md diff --git a/app/data/stacks/mainnet-go-opera/stack.yml b/stack_orchestrator/data/stacks/mainnet-go-opera/stack.yml similarity index 100% rename from app/data/stacks/mainnet-go-opera/stack.yml rename to stack_orchestrator/data/stacks/mainnet-go-opera/stack.yml diff --git a/app/data/stacks/mainnet-laconic/README.md b/stack_orchestrator/data/stacks/mainnet-laconic/README.md similarity index 100% rename from app/data/stacks/mainnet-laconic/README.md rename to stack_orchestrator/data/stacks/mainnet-laconic/README.md diff --git a/app/data/stacks/mainnet-laconic/deploy/commands.py b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py similarity index 97% rename from app/data/stacks/mainnet-laconic/deploy/commands.py rename to stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py index 030200f1..b611a0d6 100644 --- a/app/data/stacks/mainnet-laconic/deploy/commands.py +++ b/stack_orchestrator/data/stacks/mainnet-laconic/deploy/commands.py @@ -13,11 +13,11 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.util import get_yaml -from app.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext -from app.deploy.stack_state import State -from app.deploy.deploy_util import VolumeMapping, run_container_command -from app.command_types import CommandOptions +from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext +from stack_orchestrator.deploy.stack_state import State +from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command +from stack_orchestrator.command_types import CommandOptions from enum import Enum from pathlib import Path from shutil import copyfile, copytree diff --git a/app/data/stacks/mainnet-laconic/stack.yml b/stack_orchestrator/data/stacks/mainnet-laconic/stack.yml similarity index 100% rename from app/data/stacks/mainnet-laconic/stack.yml rename to stack_orchestrator/data/stacks/mainnet-laconic/stack.yml diff --git a/app/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh b/stack_orchestrator/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh similarity index 100% rename from app/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh rename to stack_orchestrator/data/stacks/mainnet-laconic/test/run-mainnet-laconic-test.sh diff --git a/app/data/stacks/mobymask-v2/README.md b/stack_orchestrator/data/stacks/mobymask-v2/README.md similarity index 100% rename from app/data/stacks/mobymask-v2/README.md rename to stack_orchestrator/data/stacks/mobymask-v2/README.md diff --git a/app/data/stacks/mobymask-v2/demo.md b/stack_orchestrator/data/stacks/mobymask-v2/demo.md similarity index 100% rename from app/data/stacks/mobymask-v2/demo.md rename to stack_orchestrator/data/stacks/mobymask-v2/demo.md diff --git a/app/data/stacks/mobymask-v2/mobymask-only.md b/stack_orchestrator/data/stacks/mobymask-v2/mobymask-only.md similarity index 100% rename from app/data/stacks/mobymask-v2/mobymask-only.md rename to stack_orchestrator/data/stacks/mobymask-v2/mobymask-only.md diff --git a/app/data/stacks/mobymask-v2/stack.yml b/stack_orchestrator/data/stacks/mobymask-v2/stack.yml similarity index 100% rename from app/data/stacks/mobymask-v2/stack.yml rename to stack_orchestrator/data/stacks/mobymask-v2/stack.yml diff --git a/app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md b/stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md similarity index 100% rename from app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md rename to stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md diff --git a/app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md b/stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md similarity index 100% rename from app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md rename to stack_orchestrator/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md diff --git a/app/data/stacks/mobymask-v2/web-apps.md b/stack_orchestrator/data/stacks/mobymask-v2/web-apps.md similarity index 100% rename from app/data/stacks/mobymask-v2/web-apps.md rename to stack_orchestrator/data/stacks/mobymask-v2/web-apps.md diff --git a/app/data/stacks/mobymask-v3/README.md b/stack_orchestrator/data/stacks/mobymask-v3/README.md similarity index 100% rename from app/data/stacks/mobymask-v3/README.md rename to stack_orchestrator/data/stacks/mobymask-v3/README.md diff --git a/app/data/stacks/mobymask-v3/stack.yml b/stack_orchestrator/data/stacks/mobymask-v3/stack.yml similarity index 100% rename from app/data/stacks/mobymask-v3/stack.yml rename to stack_orchestrator/data/stacks/mobymask-v3/stack.yml diff --git a/app/data/stacks/mobymask-v3/watcher.md b/stack_orchestrator/data/stacks/mobymask-v3/watcher.md similarity index 100% rename from app/data/stacks/mobymask-v3/watcher.md rename to stack_orchestrator/data/stacks/mobymask-v3/watcher.md diff --git a/app/data/stacks/mobymask-v3/web-app.md b/stack_orchestrator/data/stacks/mobymask-v3/web-app.md similarity index 100% rename from app/data/stacks/mobymask-v3/web-app.md rename to stack_orchestrator/data/stacks/mobymask-v3/web-app.md diff --git a/app/data/stacks/mobymask/README.md b/stack_orchestrator/data/stacks/mobymask/README.md similarity index 100% rename from app/data/stacks/mobymask/README.md rename to stack_orchestrator/data/stacks/mobymask/README.md diff --git a/app/data/stacks/mobymask/stack.yml b/stack_orchestrator/data/stacks/mobymask/stack.yml similarity index 100% rename from app/data/stacks/mobymask/stack.yml rename to stack_orchestrator/data/stacks/mobymask/stack.yml diff --git a/app/data/stacks/package-registry/README.md b/stack_orchestrator/data/stacks/package-registry/README.md similarity index 100% rename from app/data/stacks/package-registry/README.md rename to stack_orchestrator/data/stacks/package-registry/README.md diff --git a/app/data/stacks/package-registry/stack.yml b/stack_orchestrator/data/stacks/package-registry/stack.yml similarity index 100% rename from app/data/stacks/package-registry/stack.yml rename to stack_orchestrator/data/stacks/package-registry/stack.yml diff --git a/app/data/stacks/reth/README.md b/stack_orchestrator/data/stacks/reth/README.md similarity index 100% rename from app/data/stacks/reth/README.md rename to stack_orchestrator/data/stacks/reth/README.md diff --git a/app/data/stacks/reth/stack.yml b/stack_orchestrator/data/stacks/reth/stack.yml similarity index 100% rename from app/data/stacks/reth/stack.yml rename to stack_orchestrator/data/stacks/reth/stack.yml diff --git a/app/data/stacks/sushiswap-subgraph/README.md b/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md similarity index 100% rename from app/data/stacks/sushiswap-subgraph/README.md rename to stack_orchestrator/data/stacks/sushiswap-subgraph/README.md diff --git a/app/data/stacks/sushiswap-subgraph/stack.yml b/stack_orchestrator/data/stacks/sushiswap-subgraph/stack.yml similarity index 100% rename from app/data/stacks/sushiswap-subgraph/stack.yml rename to stack_orchestrator/data/stacks/sushiswap-subgraph/stack.yml diff --git a/app/data/stacks/sushiswap/README.md b/stack_orchestrator/data/stacks/sushiswap/README.md similarity index 100% rename from app/data/stacks/sushiswap/README.md rename to stack_orchestrator/data/stacks/sushiswap/README.md diff --git a/app/data/stacks/sushiswap/smoke-tests.md b/stack_orchestrator/data/stacks/sushiswap/smoke-tests.md similarity index 100% rename from app/data/stacks/sushiswap/smoke-tests.md rename to stack_orchestrator/data/stacks/sushiswap/smoke-tests.md diff --git a/app/data/stacks/sushiswap/stack.yml b/stack_orchestrator/data/stacks/sushiswap/stack.yml similarity index 100% rename from app/data/stacks/sushiswap/stack.yml rename to stack_orchestrator/data/stacks/sushiswap/stack.yml diff --git a/app/data/stacks/test/README.md b/stack_orchestrator/data/stacks/test/README.md similarity index 100% rename from app/data/stacks/test/README.md rename to stack_orchestrator/data/stacks/test/README.md diff --git a/app/data/stacks/test/deploy/commands.py b/stack_orchestrator/data/stacks/test/deploy/commands.py similarity index 88% rename from app/data/stacks/test/deploy/commands.py rename to stack_orchestrator/data/stacks/test/deploy/commands.py index 2eebeea2..e6601eae 100644 --- a/app/data/stacks/test/deploy/commands.py +++ b/stack_orchestrator/data/stacks/test/deploy/commands.py @@ -13,10 +13,10 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.util import get_yaml -from app.deploy.deploy_types import DeployCommandContext -from app.deploy.stack_state import State -from app.deploy.deploy_util import VolumeMapping, run_container_command +from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.deploy_types import DeployCommandContext +from stack_orchestrator.deploy.stack_state import State +from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command from pathlib import Path default_spec_file_content = """config: diff --git a/app/data/stacks/test/stack.yml b/stack_orchestrator/data/stacks/test/stack.yml similarity index 100% rename from app/data/stacks/test/stack.yml rename to stack_orchestrator/data/stacks/test/stack.yml diff --git a/app/data/stacks/uniswap-v3/README.md b/stack_orchestrator/data/stacks/uniswap-v3/README.md similarity index 100% rename from app/data/stacks/uniswap-v3/README.md rename to stack_orchestrator/data/stacks/uniswap-v3/README.md diff --git a/app/data/stacks/uniswap-v3/stack.yml b/stack_orchestrator/data/stacks/uniswap-v3/stack.yml similarity index 100% rename from app/data/stacks/uniswap-v3/stack.yml rename to stack_orchestrator/data/stacks/uniswap-v3/stack.yml diff --git a/app/data/version.txt b/stack_orchestrator/data/version.txt similarity index 100% rename from app/data/version.txt rename to stack_orchestrator/data/version.txt diff --git a/app/deploy/__init__.py b/stack_orchestrator/deploy/__init__.py similarity index 100% rename from app/deploy/__init__.py rename to stack_orchestrator/deploy/__init__.py diff --git a/app/deploy/compose/__init__.py b/stack_orchestrator/deploy/compose/__init__.py similarity index 100% rename from app/deploy/compose/__init__.py rename to stack_orchestrator/deploy/compose/__init__.py diff --git a/app/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py similarity index 96% rename from app/deploy/compose/deploy_docker.py rename to stack_orchestrator/deploy/compose/deploy_docker.py index e8ee4b9f..1e5f5f81 100644 --- a/app/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -15,7 +15,7 @@ from pathlib import Path from python_on_whales import DockerClient, DockerException -from app.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator +from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator class DockerDeployer(Deployer): diff --git a/app/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py similarity index 96% rename from app/deploy/deploy.py rename to stack_orchestrator/deploy/deploy.py index a2d7cc01..57fedebf 100644 --- a/app/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -24,13 +24,13 @@ from importlib import resources import subprocess import click from pathlib import Path -from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path -from app.deploy.deployer import Deployer, DeployerException -from app.deploy.deployer_factory import getDeployer -from app.deploy.deploy_types import ClusterContext, DeployCommandContext -from app.deploy.deployment_create import create as deployment_create -from app.deploy.deployment_create import init as deployment_init -from app.deploy.deployment_create import setup as deployment_setup +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path +from stack_orchestrator.deploy.deployer import Deployer, DeployerException +from stack_orchestrator.deploy.deployer_factory import getDeployer +from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext +from stack_orchestrator.deploy.deployment_create import create as deployment_create +from stack_orchestrator.deploy.deployment_create import init as deployment_init +from stack_orchestrator.deploy.deployment_create import setup as deployment_setup @click.group() @@ -273,7 +273,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): print(f"Using cluster name: {cluster}") # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with resources.open_text(data, "pod-list.txt") as pod_list_file: all_pods = pod_list_file.read().splitlines() diff --git a/app/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py similarity index 93% rename from app/deploy/deploy_types.py rename to stack_orchestrator/deploy/deploy_types.py index 16b5c313..b0c59380 100644 --- a/app/deploy/deploy_types.py +++ b/stack_orchestrator/deploy/deploy_types.py @@ -16,8 +16,8 @@ from typing import List from dataclasses import dataclass from pathlib import Path -from app.command_types import CommandOptions -from app.deploy.deployer import Deployer +from stack_orchestrator.command_types import CommandOptions +from stack_orchestrator.deploy.deployer import Deployer @dataclass diff --git a/app/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py similarity index 92% rename from app/deploy/deploy_util.py rename to stack_orchestrator/deploy/deploy_util.py index 4a1ffbfe..9829490d 100644 --- a/app/deploy/deploy_util.py +++ b/stack_orchestrator/deploy/deploy_util.py @@ -15,8 +15,8 @@ import os from typing import List -from app.deploy.deploy_types import DeployCommandContext, VolumeMapping -from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list +from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping +from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list def _container_image_from_service(stack: str, service: str): diff --git a/app/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py similarity index 100% rename from app/deploy/deployer.py rename to stack_orchestrator/deploy/deployer.py diff --git a/app/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py similarity index 87% rename from app/deploy/deployer_factory.py rename to stack_orchestrator/deploy/deployer_factory.py index 0c0ef69d..262fa2dd 100644 --- a/app/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -13,8 +13,8 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator -from app.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator +from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator +from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator def getDeployerConfigGenerator(type: str): diff --git a/app/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py similarity index 94% rename from app/deploy/deployment.py rename to stack_orchestrator/deploy/deployment.py index b1b4a486..c6656b01 100644 --- a/app/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -16,10 +16,10 @@ import click from pathlib import Path import sys -from app.deploy.deploy import up_operation, down_operation, ps_operation, port_operation -from app.deploy.deploy import exec_operation, logs_operation, create_deploy_context -from app.deploy.stack import Stack -from app.deploy.spec import Spec +from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation +from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context +from stack_orchestrator.deploy.stack import Stack +from stack_orchestrator.deploy.spec import Spec class DeploymentContext: diff --git a/app/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py similarity index 97% rename from app/deploy/deployment_create.py rename to stack_orchestrator/deploy/deployment_create.py index 4f297286..8a2237a8 100644 --- a/app/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -21,10 +21,11 @@ from typing import List import random from shutil import copy, copyfile, copytree import sys -from app.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, - get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths) -from app.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand -from app.deploy.deployer_factory import getDeployerConfigGenerator +from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, + global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, + get_pod_script_paths, get_plugin_code_paths) +from stack_orchestrator.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand +from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator def _make_default_deployment_dir(): diff --git a/app/deploy/k8s/__init__.py b/stack_orchestrator/deploy/k8s/__init__.py similarity index 100% rename from app/deploy/k8s/__init__.py rename to stack_orchestrator/deploy/k8s/__init__.py diff --git a/app/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py similarity index 94% rename from app/deploy/k8s/cluster_info.py rename to stack_orchestrator/deploy/k8s/cluster_info.py index dfb1ef53..5d785a01 100644 --- a/app/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -16,9 +16,9 @@ from kubernetes import client from typing import Any, List, Set -from app.opts import opts -from app.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files -from app.deploy.k8s.helpers import parsed_pod_files_map_from_file_names +from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files +from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names class ClusterInfo: diff --git a/app/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py similarity index 92% rename from app/deploy/k8s/deploy_k8s.py rename to stack_orchestrator/deploy/k8s/deploy_k8s.py index 16b5f0b4..a5167185 100644 --- a/app/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -16,11 +16,11 @@ from pathlib import Path from kubernetes import client, config -from app.deploy.deployer import Deployer, DeployerConfigGenerator -from app.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind -from app.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config -from app.deploy.k8s.cluster_info import ClusterInfo -from app.opts import opts +from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator +from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind +from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config +from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo +from stack_orchestrator.opts import opts class K8sDeployer(Deployer): diff --git a/app/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py similarity index 98% rename from app/deploy/k8s/helpers.py rename to stack_orchestrator/deploy/k8s/helpers.py index 6194ac5e..8536a521 100644 --- a/app/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -18,8 +18,8 @@ from pathlib import Path import subprocess from typing import Any, Set -from app.opts import opts -from app.util import get_yaml +from stack_orchestrator.opts import opts +from stack_orchestrator.util import get_yaml def _run_command(command: str): diff --git a/app/deploy/spec.py b/stack_orchestrator/deploy/spec.py similarity index 95% rename from app/deploy/spec.py rename to stack_orchestrator/deploy/spec.py index a23bc167..9ee893b9 100644 --- a/app/deploy/spec.py +++ b/stack_orchestrator/deploy/spec.py @@ -15,7 +15,7 @@ from pathlib import Path import typing -from app.util import get_yaml +from stack_orchestrator.util import get_yaml class Spec: diff --git a/app/deploy/stack.py b/stack_orchestrator/deploy/stack.py similarity index 95% rename from app/deploy/stack.py rename to stack_orchestrator/deploy/stack.py index 1f94acdf..e0d33851 100644 --- a/app/deploy/stack.py +++ b/stack_orchestrator/deploy/stack.py @@ -15,7 +15,7 @@ from pathlib import Path import typing -from app.util import get_yaml +from stack_orchestrator.util import get_yaml class Stack: diff --git a/app/deploy/stack_state.py b/stack_orchestrator/deploy/stack_state.py similarity index 100% rename from app/deploy/stack_state.py rename to stack_orchestrator/deploy/stack_state.py diff --git a/cli.py b/stack_orchestrator/main.py similarity index 82% rename from cli.py rename to stack_orchestrator/main.py index 38bdddd9..ca1914e6 100644 --- a/cli.py +++ b/stack_orchestrator/main.py @@ -15,15 +15,15 @@ import click -from app.command_types import CommandOptions -from app.repos import setup_repositories -from app.build import build_containers -from app.build import build_npms -from app.deploy import deploy -from app import version -from app.deploy import deployment -from app import opts -from app import update +from stack_orchestrator.command_types import CommandOptions +from stack_orchestrator.repos import setup_repositories +from stack_orchestrator.build import build_containers +from stack_orchestrator.build import build_npms +from stack_orchestrator.deploy import deploy +from stack_orchestrator import version +from stack_orchestrator.deploy import deployment +from stack_orchestrator import opts +from stack_orchestrator import update CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) diff --git a/app/opts.py b/stack_orchestrator/opts.py similarity index 92% rename from app/opts.py rename to stack_orchestrator/opts.py index 193637c2..665da535 100644 --- a/app/opts.py +++ b/stack_orchestrator/opts.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.command_types import CommandOptions +from stack_orchestrator.command_types import CommandOptions class opts: diff --git a/app/repos/__init__.py b/stack_orchestrator/repos/__init__.py similarity index 100% rename from app/repos/__init__.py rename to stack_orchestrator/repos/__init__.py diff --git a/app/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py similarity index 99% rename from app/repos/setup_repositories.py rename to stack_orchestrator/repos/setup_repositories.py index 0ce11670..feca7897 100644 --- a/app/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -25,7 +25,7 @@ import click import importlib.resources from pathlib import Path import yaml -from app.util import include_exclude_check +from stack_orchestrator.util import include_exclude_check class GitProgress(git.RemoteProgress): @@ -232,7 +232,7 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches os.makedirs(dev_root_path) # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file: all_repos = repository_list_file.read().splitlines() diff --git a/app/update.py b/stack_orchestrator/update.py similarity index 98% rename from app/update.py rename to stack_orchestrator/update.py index 9f70b06e..a41eabae 100644 --- a/app/update.py +++ b/stack_orchestrator/update.py @@ -23,7 +23,7 @@ import sys import stat import shutil import validators -from app.util import get_yaml +from stack_orchestrator.util import get_yaml def _download_url(url: str, file_path: Path): diff --git a/app/util.py b/stack_orchestrator/util.py similarity index 100% rename from app/util.py rename to stack_orchestrator/util.py diff --git a/app/version.py b/stack_orchestrator/version.py similarity index 96% rename from app/version.py rename to stack_orchestrator/version.py index 5a5c33d4..68e47b44 100644 --- a/app/version.py +++ b/stack_orchestrator/version.py @@ -23,7 +23,7 @@ def command(ctx): '''print tool version''' # See: https://stackoverflow.com/a/20885799/1701505 - from app import data + from stack_orchestrator import data with importlib.resources.open_text(data, "build_tag.txt") as version_file: # TODO: code better version that skips comment lines version_string = version_file.read().splitlines()[1] From 660326f71355b85e3102edc3508c969123bbac01 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 7 Nov 2023 18:15:04 -0600 Subject: [PATCH 14/62] Add new build-webapp command and related scripts and containers. (#626) * Add new build-webapp command and related scripts and containers. --- .gitea/workflows/test-webapp.yml | 49 +++++++ stack_orchestrator/build/build_containers.py | 126 ++++++++++-------- stack_orchestrator/build/build_webapp.py | 76 +++++++++++ .../cerc-nextjs-base/Dockerfile | 55 ++++++++ .../cerc-nextjs-base/Dockerfile.webapp | 5 + .../container-build/cerc-nextjs-base/build.sh | 13 ++ .../scripts/apply-runtime-env.sh | 36 +++++ .../cerc-nextjs-base/scripts/build-app.sh | 63 +++++++++ .../cerc-nextjs-base/scripts/find-env.sh | 24 ++++ .../scripts/start-serving-app.sh | 13 ++ stack_orchestrator/main.py | 2 + tests/webapp-test/run-webapp-test.sh | 62 +++++++++ tests/webapp-test/test.before | 34 +++++ 13 files changed, 506 insertions(+), 52 deletions(-) create mode 100644 .gitea/workflows/test-webapp.yml create mode 100644 stack_orchestrator/build/build_webapp.py create mode 100644 stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile create mode 100644 stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp create mode 100755 stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh create mode 100755 stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh create mode 100755 stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh create mode 100755 stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh create mode 100755 stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh create mode 100755 tests/webapp-test/run-webapp-test.sh create mode 100644 tests/webapp-test/test.before diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml new file mode 100644 index 00000000..9fbf84b2 --- /dev/null +++ b/.gitea/workflows/test-webapp.yml @@ -0,0 +1,49 @@ +name: Webapp Test + +on: + pull_request: + branches: '*' + push: + branches: + - main + - ci-test + paths-ignore: + - '.gitea/workflows/triggers/*' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + +jobs: + test: + name: "Run webapp test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Run webapp tests" + run: ./tests/webapp-test/run-webapp-test.sh diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py index c97a974f..5b2748cc 100644 --- a/stack_orchestrator/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -33,6 +33,73 @@ from stack_orchestrator.base import get_npm_registry_url # TODO: find a place for this # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" +def make_container_build_env(dev_root_path: str, + container_build_dir: str, + debug: bool, + force_rebuild: bool, + extra_build_args: str): + container_build_env = { + "CERC_NPM_REGISTRY_URL": get_npm_registry_url(), + "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""), + "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), + "CERC_REPO_BASE_DIR": dev_root_path, + "CERC_CONTAINER_BASE_DIR": container_build_dir, + "CERC_HOST_UID": f"{os.getuid()}", + "CERC_HOST_GID": f"{os.getgid()}", + "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0") + } + container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) + container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + docker_host_env = os.getenv("DOCKER_HOST") + if docker_host_env: + container_build_env.update({"DOCKER_HOST": docker_host_env}) + + return container_build_env + + +def process_container(container, + container_build_dir: str, + container_build_env: dict, + dev_root_path: str, + quiet: bool, + verbose: bool, + dry_run: bool, + continue_on_error: bool, + ): + if not quiet: + print(f"Building: {container}") + build_dir = os.path.join(container_build_dir, container.replace("/", "-")) + build_script_filename = os.path.join(build_dir, "build.sh") + if verbose: + print(f"Build script filename: {build_script_filename}") + if os.path.exists(build_script_filename): + build_command = build_script_filename + else: + if verbose: + print(f"No script file found: {build_script_filename}, using default build script") + repo_dir = container.split('/')[1] + # TODO: make this less of a hack -- should be specified in some metadata somewhere + # Check if we have a repo for this container. If not, set the context dir to the container-build subdir + repo_full_path = os.path.join(dev_root_path, repo_dir) + repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir + build_command = os.path.join(container_build_dir, + "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" + if not dry_run: + if verbose: + print(f"Executing: {build_command} with environment: {container_build_env}") + build_result = subprocess.run(build_command, shell=True, env=container_build_env) + if verbose: + print(f"Return code is: {build_result.returncode}") + if build_result.returncode != 0: + print(f"Error running build for {container}") + if not continue_on_error: + print("FATAL Error: container build failed and --continue-on-error not set, exiting") + sys.exit(1) + else: + print("****** Container Build Error, continuing because --continue-on-error is set") + else: + print("Skipped") @click.command() @click.option('--include', help="only build these containers") @@ -83,61 +150,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): if stack: print(f"Stack: {stack}") - # TODO: make this configurable - container_build_env = { - "CERC_NPM_REGISTRY_URL": get_npm_registry_url(), - "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""), - "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), - "CERC_REPO_BASE_DIR": dev_root_path, - "CERC_CONTAINER_BASE_DIR": container_build_dir, - "CERC_HOST_UID": f"{os.getuid()}", - "CERC_HOST_GID": f"{os.getgid()}", - "DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0") - } - container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) - container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) - container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) - docker_host_env = os.getenv("DOCKER_HOST") - if docker_host_env: - container_build_env.update({"DOCKER_HOST": docker_host_env}) - - def process_container(container): - if not quiet: - print(f"Building: {container}") - build_dir = os.path.join(container_build_dir, container.replace("/", "-")) - build_script_filename = os.path.join(build_dir, "build.sh") - if verbose: - print(f"Build script filename: {build_script_filename}") - if os.path.exists(build_script_filename): - build_command = build_script_filename - else: - if verbose: - print(f"No script file found: {build_script_filename}, using default build script") - repo_dir = container.split('/')[1] - # TODO: make this less of a hack -- should be specified in some metadata somewhere - # Check if we have a repo for this container. If not, set the context dir to the container-build subdir - repo_full_path = os.path.join(dev_root_path, repo_dir) - repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir - build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" - if not dry_run: - if verbose: - print(f"Executing: {build_command} with environment: {container_build_env}") - build_result = subprocess.run(build_command, shell=True, env=container_build_env) - if verbose: - print(f"Return code is: {build_result.returncode}") - if build_result.returncode != 0: - print(f"Error running build for {container}") - if not continue_on_error: - print("FATAL Error: container build failed and --continue-on-error not set, exiting") - sys.exit(1) - else: - print("****** Container Build Error, continuing because --continue-on-error is set") - else: - print("Skipped") + container_build_env = make_container_build_env(dev_root_path, + container_build_dir, + debug, + force_rebuild, + extra_build_args) for container in containers_in_scope: if include_exclude_check(container, include, exclude): - process_container(container) + process_container(container, container_build_dir, container_build_env, + dev_root_path, quiet, verbose, dry_run, continue_on_error) else: if verbose: print(f"Excluding: {container}") diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py new file mode 100644 index 00000000..f4668c5d --- /dev/null +++ b/stack_orchestrator/build/build_webapp.py @@ -0,0 +1,76 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds webapp containers + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import os +from decouple import config +import click +from pathlib import Path +from stack_orchestrator.build import build_containers + + +@click.command() +@click.option('--base-container', default="cerc/nextjs-base") +@click.option('--source-repo', help="directory containing the webapp to build", required=True) +@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") +@click.option("--extra-build-args", help="Supply extra arguments to build") +@click.pass_context +def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): + '''build the specified webapp container''' + + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + dry_run = ctx.obj.dry_run + debug = ctx.obj.debug + local_stack = ctx.obj.local_stack + stack = ctx.obj.stack + continue_on_error = ctx.obj.continue_on_error + + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build") + + if local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + if not quiet: + print(f'Dev Root is: {dev_root_path}') + + # First build the base container. + container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug, + force_rebuild, extra_build_args) + + build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet, + verbose, dry_run, continue_on_error) + + + # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir. + container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo) + container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, + base_container.replace("/", "-"), + "Dockerfile.webapp") + webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] + container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" + + build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet, + verbose, dry_run, continue_on_error) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile new file mode 100644 index 00000000..147cec29 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -0,0 +1,55 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=18-bullseye +FROM node:${VARIANT} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + && npm cache clean --force > /dev/null 2>&1 + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq gettext-base + +# [Optional] Uncomment if you want to install an additional version of node using nvm +# ARG EXTRA_NODE_VERSION=10 +# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" + +# We do this to get a yq binary from the published container, for the correct architecture we're building here +# COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq + +COPY /scripts /scripts + +# [Optional] Uncomment if you want to install more global node modules +# RUN su node -c "npm install -g " + +# RUN mkdir -p /config +# COPY ./config.yml /config + +# Install simple web server for now (use nginx perhaps later) +# RUN yarn global add http-server + +# Expose port for http +EXPOSE 3000 + +# Default command sleeps forever so docker doesn't kill it +CMD ["/scripts/start-serving-app.sh"] diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp new file mode 100644 index 00000000..f4b5d4d8 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp @@ -0,0 +1,5 @@ +FROM cerc/nextjs-base:local +WORKDIR /app +COPY . . +RUN rm -rf node_modules build .next* +RUN /scripts/build-app.sh /app diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh new file mode 100755 index 00000000..3cf5f7f4 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Build cerc/laconic-registry-cli + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +CERC_CONTAINER_BUILD_WORK_DIR=${CERC_CONTAINER_BUILD_WORK_DIR:-$SCRIPT_DIR} +CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/Dockerfile} +CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local} + +docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh new file mode 100755 index 00000000..ba1cd17d --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +WORK_DIR="${1:-./}" +SRC_DIR="${2:-.next}" +TRG_DIR="${3:-.next-r}" + +cd "${WORK_DIR}" || exit 1 + +rm -rf "$TRG_DIR" +mkdir -p "$TRG_DIR" +cp -rp "$SRC_DIR" "$TRG_DIR/" + +if [ -f ".env" ]; then + TMP_ENV=`mktemp` + declare -px > $TMP_ENV + set -a + source .env + source $TMP_ENV + set +a + rm -f $TMP_ENV +fi + +for f in $(find "$TRG_DIR" -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do + for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o '^"CERC_RUNTIME_ENV[^\"]+"$'); do + orig_name=$(echo -n "${e}" | sed 's/"//g') + cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g') + cur_val=$(echo -n "\$${cur_name}" | envsubst) + esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val") + echo "$cur_name=$cur_val" + sed -i "s/$orig_name/$esc_val/g" $f + done +done diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh new file mode 100755 index 00000000..9277abc6 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +WORK_DIR="${1:-/app}" + +cd "${WORK_DIR}" || exit 1 + +cp next.config.js next.config.dist + +npm i -g js-beautify +js-beautify next.config.dist > next.config.js + +npm install + +CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }') +MOD_EXPORTS_LINE=$(grep -n 'module.exports' next.config.js | cut -d':' -f1) + +head -$(( ${MOD_EXPORTS_LINE} - 1 )) next.config.js > next.config.js.1 + +cat > next.config.js.2 < { + a[v] = \`"CERC_RUNTIME_ENV_\${v.split(/\./).pop()}"\`; + return a; + }, {}); +} catch { + // If .env-list.json cannot be loaded, we are probably running in dev mode, so use process.env instead. + envMap = Object.keys(process.env).reduce((a, v) => { + if (v.startsWith('CERC_')) { + a[\`process.env.\${v}\`] = JSON.stringify(process.env[v]); + } + return a; + }, {}); +} +EOF + +grep 'module.exports' next.config.js > next.config.js.3 + +cat > next.config.js.4 < { + config.plugins.push(new webpack.DefinePlugin(envMap)); + return config; + }, +EOF + +tail -$(( ${CONFIG_LINES} - ${MOD_EXPORTS_LINE} + 1 )) next.config.js | grep -v 'process\.env\.' > next.config.js.5 + +cat next.config.js.* | js-beautify > next.config.js +rm next.config.js.* + +"${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json + +npm run build +rm .env-list.json \ No newline at end of file diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh new file mode 100755 index 00000000..0c0e87c9 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +WORK_DIR="${1:-./}" +TMPF=$(mktemp) + +cd "$WORK_DIR" || exit 1 + +for d in $(find . -maxdepth 1 -type d | grep -v '\./\.' | grep '/' | cut -d'/' -f2); do + egrep "/$d[/$]?" .gitignore >/dev/null 2>/dev/null + if [ $? -eq 0 ]; then + continue + fi + + for f in $(find "$d" -regex ".*.[tj]sx?$" -type f); do + cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF + done +done + +cat $TMPF | sort -u | jq --raw-input . | jq --slurp . +rm -f $TMPF diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh new file mode 100755 index 00000000..abe72935 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}" +cd "$CERC_WEBAPP_FILES_DIR" + +rm -rf .next-r +"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r +npm start .next-r -p ${CERC_LISTEN_PORT:-3000} diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py index ca1914e6..0b0585e0 100644 --- a/stack_orchestrator/main.py +++ b/stack_orchestrator/main.py @@ -19,6 +19,7 @@ from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.repos import setup_repositories from stack_orchestrator.build import build_containers from stack_orchestrator.build import build_npms +from stack_orchestrator.build import build_webapp from stack_orchestrator.deploy import deploy from stack_orchestrator import version from stack_orchestrator.deploy import deployment @@ -48,6 +49,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(build_containers.command, "build-containers") cli.add_command(build_npms.command, "build-npms") +cli.add_command(build_webapp.command, "build-webapp") cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy-system") cli.add_command(deployment.command, "deployment") diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh new file mode 100755 index 00000000..75c4cbd1 --- /dev/null +++ b/tests/webapp-test/run-webapp-test.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +# Dump environment variables for debugging +echo "Environment variables:" +env +# Test basic stack-orchestrator webapp +echo "Running stack-orchestrator webapp test" +# Bit of a hack, test the most recent package +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +git clone https://git.vdb.to/cerc-io/test-progressive-web-app.git $CERC_REPO_BASE_DIR/test-progressive-web-app + +# Test webapp command execution +$TEST_TARGET_SO build-webapp --source-repo $CERC_REPO_BASE_DIR/test-progressive-web-app + +UUID=`uuidgen` + +set +e + +CONTAINER_ID=$(docker run -p 3000:3000 -d cerc/test-progressive-web-app:local) +sleep 3 +wget -O test.before -m http://localhost:3000 + +docker remove -f $CONTAINER_ID + +CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$UUID -d cerc/test-progressive-web-app:local) +sleep 3 +wget -O test.after -m http://localhost:3000 + +docker remove -f $CONTAINER_ID + +echo "###########################################################################" +echo "" + +grep "$UUID" test.before > /dev/null +if [ $? -ne 1 ]; then + echo "BEFORE: FAILED" + exit 1 +else + echo "BEFORE: PASSED" +fi + +grep "`uuidgen`" test.after > /dev/null +if [ $? -ne 0 ]; then + echo "AFTER: FAILED" + exit 1 +else + echo "AFTER: PASSED" +fi + +exit 0 \ No newline at end of file diff --git a/tests/webapp-test/test.before b/tests/webapp-test/test.before new file mode 100644 index 00000000..e349f10d --- /dev/null +++ b/tests/webapp-test/test.before @@ -0,0 +1,34 @@ +Laconic Test PWA

Welcome to Laconic!

CONFIG1 has value: CERC_RUNTIME_ENV_CERC_TEST_WEBAPP_CONFIG1

CONFIG2 has value: CERC_RUNTIME_ENV_CERC_TEST_WEBAPP_CONFIG2

WEBAPP_DEBUG has value: CERC_RUNTIME_ENV_CERC_WEBAPP_DEBUG

body,html{padding:0;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif}a{color:inherit;text-decoration:none}*{box-sizing:border-box}.Home_container__d256j{min-height:100vh;padding:0 .5rem;flex-direction:column}.Home_container__d256j,.Home_main__VkIEL{display:flex;justify-content:center;align-items:center}.Home_main__VkIEL{padding:5rem 0;flex:1 1;flex-direction:column}.Home_footer__yFiaX{width:100%;height:100px;border-top:1px solid #eaeaea;display:flex;justify-content:center;align-items:center}.Home_footer__yFiaX img{margin-left:.5rem}.Home_footer__yFiaX a{display:flex;justify-content:center;align-items:center}.Home_title__hYX6j a{color:#0070f3;text-decoration:none}.Home_title__hYX6j a:active,.Home_title__hYX6j a:focus,.Home_title__hYX6j a:hover{text-decoration:underline}.Home_title__hYX6j{margin:0;line-height:1.15;font-size:4rem}.Home_description__uXNdx,.Home_title__hYX6j{text-align:center}.Home_description__uXNdx{line-height:1.5;font-size:1.5rem}.Home_code__VVrIr{background:#fafafa;border-radius:5px;padding:.75rem;font-size:1.1rem;font-family:Menlo,Monaco,Lucida Console,Liberation Mono,DejaVu Sans Mono,Bitstream Vera Sans Mono,Courier New,monospace}.Home_grid__AVljO{display:flex;align-items:center;justify-content:center;flex-wrap:wrap;max-width:800px;margin-top:3rem}.Home_card__E5spL{margin:1rem;flex-basis:45%;padding:1.5rem;text-align:left;color:inherit;text-decoration:none;border:1px solid #eaeaea;border-radius:10px;transition:color .15s ease,border-color .15s ease}.Home_card__E5spL:active,.Home_card__E5spL:focus,.Home_card__E5spL:hover{color:#0070f3;border-color:#0070f3}.Home_card__E5spL h3{margin:0 0 1rem;font-size:1.5rem}.Home_card__E5spL p{margin:0;font-size:1.25rem;line-height:1.5}.Home_logo__IOQAX{height:1em}@media (max-width:600px){.Home_grid__AVljO{width:100%;flex-direction:column}}!function(){var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(t){var e={exports:{}};return t(e,e.exports),e.exports}var r=function(t){return t&&t.Math==Math&&t},n=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof t&&t)||Function("return this")(),o=function(t){try{return!!t()}catch(t){return!0}},i=!o(function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}),a={}.propertyIsEnumerable,u=Object.getOwnPropertyDescriptor,s=u&&!a.call({1:2},1)?function(t){var e=u(this,t);return!!e&&e.enumerable}:a,c={f:s},f=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}},l={}.toString,h=function(t){return l.call(t).slice(8,-1)},p="".split,d=o(function(){return!Object("z").propertyIsEnumerable(0)})?function(t){return"String"==h(t)?p.call(t,""):Object(t)}:Object,v=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t},g=function(t){return d(v(t))},y=function(t){return"object"==typeof t?null!==t:"function"==typeof t},m=function(t,e){if(!y(t))return t;var r,n;if(e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;if("function"==typeof(r=t.valueOf)&&!y(n=r.call(t)))return n;if(!e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;throw TypeError("Can't convert object to primitive value")},b={}.hasOwnProperty,w=function(t,e){return b.call(t,e)},S=n.document,E=y(S)&&y(S.createElement),x=function(t){return E?S.createElement(t):{}},A=!i&&!o(function(){return 7!=Object.defineProperty(x("div"),"a",{get:function(){return 7}}).a}),O=Object.getOwnPropertyDescriptor,R={f:i?O:function(t,e){if(t=g(t),e=m(e,!0),A)try{return O(t,e)}catch(t){}if(w(t,e))return f(!c.f.call(t,e),t[e])}},j=function(t){if(!y(t))throw TypeError(String(t)+" is not an object");return t},P=Object.defineProperty,I={f:i?P:function(t,e,r){if(j(t),e=m(e,!0),j(r),A)try{return P(t,e,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported");return"value"in r&&(t[e]=r.value),t}},T=i?function(t,e,r){return I.f(t,e,f(1,r))}:function(t,e,r){return t[e]=r,t},k=function(t,e){try{T(n,t,e)}catch(r){n[t]=e}return e},L="__core-js_shared__",U=n[L]||k(L,{}),M=Function.toString;"function"!=typeof U.inspectSource&&(U.inspectSource=function(t){return M.call(t)});var _,N,C,F=U.inspectSource,B=n.WeakMap,D="function"==typeof B&&/native code/.test(F(B)),q=!1,z=e(function(t){(t.exports=function(t,e){return U[t]||(U[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.6.5",mode:"global",copyright:"© 2020 Denis Pushkarev (zloirock.ru)"})}),W=0,K=Math.random(),G=function(t){return"Symbol("+String(void 0===t?"":t)+")_"+(++W+K).toString(36)},$=z("keys"),V=function(t){return $[t]||($[t]=G(t))},H={};if(D){var X=new(0,n.WeakMap),Y=X.get,J=X.has,Q=X.set;_=function(t,e){return Q.call(X,t,e),e},N=function(t){return Y.call(X,t)||{}},C=function(t){return J.call(X,t)}}else{var Z=V("state");H[Z]=!0,_=function(t,e){return T(t,Z,e),e},N=function(t){return w(t,Z)?t[Z]:{}},C=function(t){return w(t,Z)}}var tt,et={set:_,get:N,has:C,enforce:function(t){return C(t)?N(t):_(t,{})},getterFor:function(t){return function(e){var r;if(!y(e)||(r=N(e)).type!==t)throw TypeError("Incompatible receiver, "+t+" required");return r}}},rt=e(function(t){var e=et.get,r=et.enforce,o=String(String).split("String");(t.exports=function(t,e,i,a){var u=!!a&&!!a.unsafe,s=!!a&&!!a.enumerable,c=!!a&&!!a.noTargetGet;"function"==typeof i&&("string"!=typeof e||w(i,"name")||T(i,"name",e),r(i).source=o.join("string"==typeof e?e:"")),t!==n?(u?!c&&t[e]&&(s=!0):delete t[e],s?t[e]=i:T(t,e,i)):s?t[e]=i:k(e,i)})(Function.prototype,"toString",function(){return"function"==typeof this&&e(this).source||F(this)})}),nt=n,ot=function(t){return"function"==typeof t?t:void 0},it=function(t,e){return arguments.length<2?ot(nt[t])||ot(n[t]):nt[t]&&nt[t][e]||n[t]&&n[t][e]},at=Math.ceil,ut=Math.floor,st=function(t){return isNaN(t=+t)?0:(t>0?ut:at)(t)},ct=Math.min,ft=function(t){return t>0?ct(st(t),9007199254740991):0},lt=Math.max,ht=Math.min,pt=function(t,e){var r=st(t);return r<0?lt(r+e,0):ht(r,e)},dt=function(t){return function(e,r,n){var o,i=g(e),a=ft(i.length),u=pt(n,a);if(t&&r!=r){for(;a>u;)if((o=i[u++])!=o)return!0}else for(;a>u;u++)if((t||u in i)&&i[u]===r)return t||u||0;return!t&&-1}},vt={includes:dt(!0),indexOf:dt(!1)},gt=vt.indexOf,yt=function(t,e){var r,n=g(t),o=0,i=[];for(r in n)!w(H,r)&&w(n,r)&&i.push(r);for(;e.length>o;)w(n,r=e[o++])&&(~gt(i,r)||i.push(r));return i},mt=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],bt=mt.concat("length","prototype"),wt={f:Object.getOwnPropertyNames||function(t){return yt(t,bt)}},St={f:Object.getOwnPropertySymbols},Et=it("Reflect","ownKeys")||function(t){var e=wt.f(j(t)),r=St.f;return r?e.concat(r(t)):e},xt=function(t,e){for(var r=Et(e),n=I.f,o=R.f,i=0;i2?arguments[2]:void 0,u=Mt((void 0===a?n:pt(a,n))-i,n-o),s=1;for(i0;)i in r?r[o]=r[i]:delete r[o],o+=s,i+=s;return r},Nt=!!Object.getOwnPropertySymbols&&!o(function(){return!String(Symbol())}),Ct=Nt&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,Ft=z("wks"),Bt=n.Symbol,Dt=Ct?Bt:Bt&&Bt.withoutSetter||G,qt=function(t){return w(Ft,t)||(Ft[t]=Nt&&w(Bt,t)?Bt[t]:Dt("Symbol."+t)),Ft[t]},zt=Object.keys||function(t){return yt(t,mt)},Wt=i?Object.defineProperties:function(t,e){j(t);for(var r,n=zt(e),o=n.length,i=0;o>i;)I.f(t,r=n[i++],e[r]);return t},Kt=it("document","documentElement"),Gt=V("IE_PROTO"),$t=function(){},Vt=function(t){return"",a=a.removeChild(a.firstChild)):"string"==typeof o.is?a=z.createElement(i,{is:o.is}):(a=z.createElement(i),"select"===i&&(z=a,o.multiple?z.multiple=!0:o.size&&(z.size=o.size))):a=z.createElementNS(a,i),a[tD]=u,a[tR]=o,s(a,u,!1,!1),u.stateNode=a;e:{switch(z=vb(i,o),i){case"dialog":D("cancel",a),D("close",a),_=o;break;case"iframe":case"object":case"embed":D("load",a),_=o;break;case"video":case"audio":for(_=0;_le&&(u.flags|=128,o=!0,Ej(j,!1),u.lanes=4194304)}}else{if(!o){if(null!==(a=Mh(z))){if(u.flags|=128,o=!0,null!==(i=a.updateQueue)&&(u.updateQueue=i,u.flags|=4),Ej(j,!0),null===j.tail&&"hidden"===j.tailMode&&!z.alternate&&!t8)return S(u),null}else 2*eq()-j.renderingStartTime>le&&1073741824!==i&&(u.flags|=128,o=!0,Ej(j,!1),u.lanes=4194304)}j.isBackwards?(z.sibling=u.child,u.child=z):(null!==(i=j.last)?i.sibling=z:u.child=z,j.last=z)}if(null!==j.tail)return u=j.tail,j.rendering=u,j.tail=u.sibling,j.renderingStartTime=eq(),u.sibling=null,i=rv.current,G(rv,o?1&i|2:1&i),u;return S(u),null;case 22:case 23:return Ij(),o=null!==u.memoizedState,null!==a&&null!==a.memoizedState!==o&&(u.flags|=8192),o&&0!=(1&u.mode)?0!=(1073741824&r0)&&(S(u),6&u.subtreeFlags&&(u.flags|=8192)):S(u),null;case 24:case 25:return null}throw Error(p(156,u.tag))}function Jj(a,u){switch(wg(u),u.tag){case 1:return Zf(u.type)&&$f(),65536&(a=u.flags)?(u.flags=-65537&a|128,u):null;case 3:return Jh(),E(tQ),E(tA),Oh(),0!=(65536&(a=u.flags))&&0==(128&a)?(u.flags=-65537&a|128,u):null;case 5:return Lh(u),null;case 13:if(E(rv),null!==(a=u.memoizedState)&&null!==a.dehydrated){if(null===u.alternate)throw Error(p(340));Ig()}return 65536&(a=u.flags)?(u.flags=-65537&a|128,u):null;case 19:return E(rv),null;case 4:return Jh(),null;case 10:return Rg(u.type._context),null;case 22:case 23:return Ij(),null;default:return null}}s=function(a,u){for(var i=u.child;null!==i;){if(5===i.tag||6===i.tag)a.appendChild(i.stateNode);else if(4!==i.tag&&null!==i.child){i.child.return=i,i=i.child;continue}if(i===u)break;for(;null===i.sibling;){if(null===i.return||i.return===u)return;i=i.return}i.sibling.return=i.return,i=i.sibling}},w=function(){},x=function(a,u,i,o){var s=a.memoizedProps;if(s!==o){a=u.stateNode,Hh(rp.current);var w,x=null;switch(i){case"input":s=Ya(a,s),o=Ya(a,o),x=[];break;case"select":s=eS({},s,{value:void 0}),o=eS({},o,{value:void 0}),x=[];break;case"textarea":s=gb(a,s),o=gb(a,o),x=[];break;default:"function"!=typeof s.onClick&&"function"==typeof o.onClick&&(a.onclick=Bf)}for(j in ub(i,o),i=null,s)if(!o.hasOwnProperty(j)&&s.hasOwnProperty(j)&&null!=s[j]){if("style"===j){var C=s[j];for(w in C)C.hasOwnProperty(w)&&(i||(i={}),i[w]="")}else"dangerouslySetInnerHTML"!==j&&"children"!==j&&"suppressContentEditableWarning"!==j&&"suppressHydrationWarning"!==j&&"autoFocus"!==j&&(U.hasOwnProperty(j)?x||(x=[]):(x=x||[]).push(j,null))}for(j in o){var _=o[j];if(C=null!=s?s[j]:void 0,o.hasOwnProperty(j)&&_!==C&&(null!=_||null!=C)){if("style"===j){if(C){for(w in C)!C.hasOwnProperty(w)||_&&_.hasOwnProperty(w)||(i||(i={}),i[w]="");for(w in _)_.hasOwnProperty(w)&&C[w]!==_[w]&&(i||(i={}),i[w]=_[w])}else i||(x||(x=[]),x.push(j,i)),i=_}else"dangerouslySetInnerHTML"===j?(_=_?_.__html:void 0,C=C?C.__html:void 0,null!=_&&C!==_&&(x=x||[]).push(j,_)):"children"===j?"string"!=typeof _&&"number"!=typeof _||(x=x||[]).push(j,""+_):"suppressContentEditableWarning"!==j&&"suppressHydrationWarning"!==j&&(U.hasOwnProperty(j)?(null!=_&&"onScroll"===j&&D("scroll",a),x||C===_||(x=[])):(x=x||[]).push(j,_))}}i&&(x=x||[]).push("style",i);var j=x;(u.updateQueue=j)&&(u.flags|=4)}},C=function(a,u,i,o){i!==o&&(u.flags|=4)};var rU=!1,rV=!1,rW="function"==typeof WeakSet?WeakSet:Set,rA=null;function Mj(a,u){var i=a.ref;if(null!==i){if("function"==typeof i)try{i(null)}catch(i){W(a,u,i)}else i.current=null}}function Nj(a,u,i){try{i()}catch(i){W(a,u,i)}}var rQ=!1;function Pj(a,u){if(tC=nk,Ne(a=Me())){if("selectionStart"in a)var i={start:a.selectionStart,end:a.selectionEnd};else e:{var o=(i=(i=a.ownerDocument)&&i.defaultView||window).getSelection&&i.getSelection();if(o&&0!==o.rangeCount){i=o.anchorNode;var s,w=o.anchorOffset,x=o.focusNode;o=o.focusOffset;try{i.nodeType,x.nodeType}catch(a){i=null;break e}var C=0,_=-1,j=-1,z=0,P=0,U=a,V=null;n:for(;;){for(;U!==i||0!==w&&3!==U.nodeType||(_=C+w),U!==x||0!==o&&3!==U.nodeType||(j=C+o),3===U.nodeType&&(C+=U.nodeValue.length),null!==(s=U.firstChild);)V=U,U=s;for(;;){if(U===a)break n;if(V===i&&++z===w&&(_=C),V===x&&++P===o&&(j=C),null!==(s=U.nextSibling))break;V=(U=V).parentNode}U=s}i=-1===_||-1===j?null:{start:_,end:j}}else i=null}i=i||{start:0,end:0}}else i=null;for(t_={focusedElem:a,selectionRange:i},nk=!1,rA=u;null!==rA;)if(a=(u=rA).child,0!=(1028&u.subtreeFlags)&&null!==a)a.return=u,rA=a;else for(;null!==rA;){u=rA;try{var B=u.alternate;if(0!=(1024&u.flags))switch(u.tag){case 0:case 11:case 15:case 5:case 6:case 4:case 17:break;case 1:if(null!==B){var $=B.memoizedProps,Y=B.memoizedState,Z=u.stateNode,X=Z.getSnapshotBeforeUpdate(u.elementType===u.type?$:Lg(u.type,$),Y);Z.__reactInternalSnapshotBeforeUpdate=X}break;case 3:var ee=u.stateNode.containerInfo;1===ee.nodeType?ee.textContent="":9===ee.nodeType&&ee.documentElement&&ee.removeChild(ee.documentElement);break;default:throw Error(p(163))}}catch(a){W(u,u.return,a)}if(null!==(a=u.sibling)){a.return=u.return,rA=a;break}rA=u.return}return B=rQ,rQ=!1,B}function Qj(a,u,i){var o=u.updateQueue;if(null!==(o=null!==o?o.lastEffect:null)){var s=o=o.next;do{if((s.tag&a)===a){var w=s.destroy;s.destroy=void 0,void 0!==w&&Nj(u,i,w)}s=s.next}while(s!==o)}}function Rj(a,u){if(null!==(u=null!==(u=u.updateQueue)?u.lastEffect:null)){var i=u=u.next;do{if((i.tag&a)===a){var o=i.create;i.destroy=o()}i=i.next}while(i!==u)}}function Sj(a){var u=a.ref;if(null!==u){var i=a.stateNode;a.tag,a=i,"function"==typeof u?u(a):u.current=a}}function Tj(a){var u=a.alternate;null!==u&&(a.alternate=null,Tj(u)),a.child=null,a.deletions=null,a.sibling=null,5===a.tag&&null!==(u=a.stateNode)&&(delete u[tD],delete u[tR],delete u[tI],delete u[tO],delete u[tF]),a.stateNode=null,a.return=null,a.dependencies=null,a.memoizedProps=null,a.memoizedState=null,a.pendingProps=null,a.stateNode=null,a.updateQueue=null}function Uj(a){return 5===a.tag||3===a.tag||4===a.tag}function Vj(a){e:for(;;){for(;null===a.sibling;){if(null===a.return||Uj(a.return))return null;a=a.return}for(a.sibling.return=a.return,a=a.sibling;5!==a.tag&&6!==a.tag&&18!==a.tag;){if(2&a.flags||null===a.child||4===a.tag)continue e;a.child.return=a,a=a.child}if(!(2&a.flags))return a.stateNode}}function Wj(a,u,i){var o=a.tag;if(5===o||6===o)a=a.stateNode,u?8===i.nodeType?i.parentNode.insertBefore(a,u):i.insertBefore(a,u):(8===i.nodeType?(u=i.parentNode).insertBefore(a,i):(u=i).appendChild(a),null!=(i=i._reactRootContainer)||null!==u.onclick||(u.onclick=Bf));else if(4!==o&&null!==(a=a.child))for(Wj(a,u,i),a=a.sibling;null!==a;)Wj(a,u,i),a=a.sibling}function Xj(a,u,i){var o=a.tag;if(5===o||6===o)a=a.stateNode,u?i.insertBefore(a,u):i.appendChild(a);else if(4!==o&&null!==(a=a.child))for(Xj(a,u,i),a=a.sibling;null!==a;)Xj(a,u,i),a=a.sibling}var rB=null,r$=!1;function Zj(a,u,i){for(i=i.child;null!==i;)ak(a,u,i),i=i.sibling}function ak(a,u,i){if(e2&&"function"==typeof e2.onCommitFiberUnmount)try{e2.onCommitFiberUnmount(e1,i)}catch(a){}switch(i.tag){case 5:rV||Mj(i,u);case 6:var o=rB,s=r$;rB=null,Zj(a,u,i),rB=o,r$=s,null!==rB&&(r$?(a=rB,i=i.stateNode,8===a.nodeType?a.parentNode.removeChild(i):a.removeChild(i)):rB.removeChild(i.stateNode));break;case 18:null!==rB&&(r$?(a=rB,i=i.stateNode,8===a.nodeType?Kf(a.parentNode,i):1===a.nodeType&&Kf(a,i),bd(a)):Kf(rB,i.stateNode));break;case 4:o=rB,s=r$,rB=i.stateNode.containerInfo,r$=!0,Zj(a,u,i),rB=o,r$=s;break;case 0:case 11:case 14:case 15:if(!rV&&null!==(o=i.updateQueue)&&null!==(o=o.lastEffect)){s=o=o.next;do{var w=s,x=w.destroy;w=w.tag,void 0!==x&&(0!=(2&w)?Nj(i,u,x):0!=(4&w)&&Nj(i,u,x)),s=s.next}while(s!==o)}Zj(a,u,i);break;case 1:if(!rV&&(Mj(i,u),"function"==typeof(o=i.stateNode).componentWillUnmount))try{o.props=i.memoizedProps,o.state=i.memoizedState,o.componentWillUnmount()}catch(a){W(i,u,a)}Zj(a,u,i);break;case 21:default:Zj(a,u,i);break;case 22:1&i.mode?(rV=(o=rV)||null!==i.memoizedState,Zj(a,u,i),rV=o):Zj(a,u,i)}}function bk(a){var u=a.updateQueue;if(null!==u){a.updateQueue=null;var i=a.stateNode;null===i&&(i=a.stateNode=new rW),u.forEach(function(u){var o=ck.bind(null,a,u);i.has(u)||(i.add(u),u.then(o,o))})}}function dk(a,u){var i=u.deletions;if(null!==i)for(var o=0;os&&(s=x),o&=~w}if(o=s,10<(o=(120>(o=eq()-o)?120:480>o?480:1080>o?1080:1920>o?1920:3e3>o?3e3:4320>o?4320:1960*rH(o/1960))-o)){a.timeoutHandle=tL(Qk.bind(null,a,r9,ln),o);break}Qk(a,r9,ln);break;default:throw Error(p(329))}}}return Ek(a,eq()),a.callbackNode===i?Hk.bind(null,a):null}function Ok(a,u){var i=r6;return a.current.memoizedState.isDehydrated&&(Lk(a,u).flags|=256),2!==(a=Jk(a,u))&&(u=r9,r9=i,null!==u&&Gj(u)),a}function Gj(a){null===r9?r9=a:r9.push.apply(r9,a)}function Pk(a){for(var u=a;;){if(16384&u.flags){var i=u.updateQueue;if(null!==i&&null!==(i=i.stores))for(var o=0;oa?16:a,null===lu)var o=!1;else{if(a=lu,lu=null,lo=0,0!=(6&rY))throw Error(p(331));var s=rY;for(rY|=4,rA=a.current;null!==rA;){var w=rA,x=w.child;if(0!=(16&rA.flags)){var C=w.deletions;if(null!==C){for(var _=0;_eq()-r7?Lk(a,0):r5|=i),Ek(a,u)}function Zk(a,u){0===u&&(0==(1&a.mode)?u=1:(u=e6,0==(130023424&(e6<<=1))&&(e6=4194304)));var i=L();null!==(a=Zg(a,u))&&(Ac(a,u,i),Ek(a,i))}function vj(a){var u=a.memoizedState,i=0;null!==u&&(i=u.retryLane),Zk(a,i)}function ck(a,u){var i=0;switch(a.tag){case 13:var o=a.stateNode,s=a.memoizedState;null!==s&&(i=s.retryLane);break;case 19:o=a.stateNode;break;default:throw Error(p(314))}null!==o&&o.delete(u),Zk(a,i)}function al(a,u,i,o){this.tag=a,this.key=i,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=u,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=o,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Bg(a,u,i,o){return new al(a,u,i,o)}function bj(a){return!(!(a=a.prototype)||!a.isReactComponent)}function $k(a){if("function"==typeof a)return bj(a)?1:0;if(null!=a){if((a=a.$$typeof)===ef)return 11;if(a===em)return 14}return 2}function wh(a,u){var i=a.alternate;return null===i?((i=Bg(a.tag,u,a.key,a.mode)).elementType=a.elementType,i.type=a.type,i.stateNode=a.stateNode,i.alternate=a,a.alternate=i):(i.pendingProps=u,i.type=a.type,i.flags=0,i.subtreeFlags=0,i.deletions=null),i.flags=14680064&a.flags,i.childLanes=a.childLanes,i.lanes=a.lanes,i.child=a.child,i.memoizedProps=a.memoizedProps,i.memoizedState=a.memoizedState,i.updateQueue=a.updateQueue,u=a.dependencies,i.dependencies=null===u?null:{lanes:u.lanes,firstContext:u.firstContext},i.sibling=a.sibling,i.index=a.index,i.ref=a.ref,i}function yh(a,u,i,o,s,w){var x=2;if(o=a,"function"==typeof a)bj(a)&&(x=1);else if("string"==typeof a)x=5;else e:switch(a){case ea:return Ah(i.children,s,w,u);case eu:x=8,s|=8;break;case eo:return(a=Bg(12,i,u,2|s)).elementType=eo,a.lanes=w,a;case ep:return(a=Bg(13,i,u,s)).elementType=ep,a.lanes=w,a;case eg:return(a=Bg(19,i,u,s)).elementType=eg,a.lanes=w,a;case eb:return qj(i,s,w,u);default:if("object"==typeof a&&null!==a)switch(a.$$typeof){case es:x=10;break e;case ec:x=9;break e;case ef:x=11;break e;case em:x=14;break e;case ev:x=16,o=null;break e}throw Error(p(130,null==a?a:typeof a,""))}return(u=Bg(x,i,u,s)).elementType=a,u.type=o,u.lanes=w,u}function Ah(a,u,i,o){return(a=Bg(7,a,o,u)).lanes=i,a}function qj(a,u,i,o){return(a=Bg(22,a,o,u)).elementType=eb,a.lanes=i,a.stateNode={isHidden:!1},a}function xh(a,u,i){return(a=Bg(6,a,null,u)).lanes=i,a}function zh(a,u,i){return(u=Bg(4,null!==a.children?a.children:[],a.key,u)).lanes=i,u.stateNode={containerInfo:a.containerInfo,pendingChildren:null,implementation:a.implementation},u}function bl(a,u,i,o,s){this.tag=u,this.containerInfo=a,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=zc(0),this.expirationTimes=zc(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=zc(0),this.identifierPrefix=o,this.onRecoverableError=s,this.mutableSourceEagerHydrationData=null}function cl(a,u,i,o,s,w,x,C,_){return a=new bl(a,u,i,C,_),1===u?(u=1,!0===w&&(u|=8)):u=0,w=Bg(3,null,null,u),a.current=w,w.stateNode=a,w.memoizedState={element:o,isDehydrated:i,cache:null,transitions:null,pendingSuspenseBoundaries:null},ah(w),a}function dl(a,u,i){var o=3>>1,s=a[o];if(0>>1;og(C,i))_g(j,C)?(a[o]=j,a[_]=i,o=_):(a[o]=C,a[x]=i,o=x);else if(_g(j,i))a[o]=j,a[_]=i,o=_;else break}}return u}function g(a,u){var i=a.sortIndex-u.sortIndex;return 0!==i?i:a.id-u.id}if("object"==typeof performance&&"function"==typeof performance.now){var i,o=performance;u.unstable_now=function(){return o.now()}}else{var s=Date,w=s.now();u.unstable_now=function(){return s.now()-w}}var x=[],C=[],_=1,j=null,z=3,P=!1,U=!1,V=!1,B="function"==typeof setTimeout?setTimeout:null,$="function"==typeof clearTimeout?clearTimeout:null,Y="undefined"!=typeof setImmediate?setImmediate:null;function G(a){for(var u=h(C);null!==u;){if(null===u.callback)k(C);else if(u.startTime<=a)k(C),u.sortIndex=u.expirationTime,f(x,u);else break;u=h(C)}}function H(a){if(V=!1,G(a),!U){if(null!==h(x))U=!0,I(J);else{var u=h(C);null!==u&&K(H,u.startTime-a)}}}function J(a,i){U=!1,V&&(V=!1,$(ee),ee=-1),P=!0;var o=z;try{for(G(i),j=h(x);null!==j&&(!(j.expirationTime>i)||a&&!M());){var s=j.callback;if("function"==typeof s){j.callback=null,z=j.priorityLevel;var w=s(j.expirationTime<=i);i=u.unstable_now(),"function"==typeof w?j.callback=w:j===h(x)&&k(x),G(i)}else k(x);j=h(x)}if(null!==j)var _=!0;else{var B=h(C);null!==B&&K(H,B.startTime-i),_=!1}return _}finally{j=null,z=o,P=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var Z=!1,X=null,ee=-1,en=5,et=-1;function M(){return!(u.unstable_now()-eta||125s?(a.sortIndex=o,f(C,a),null===h(x)&&a===h(C)&&(V?($(ee),ee=-1):V=!0,K(H,o-s))):(a.sortIndex=w,f(x,a),U||P||(U=!0,I(J))),a},u.unstable_shouldYield=M,u.unstable_wrapCallback=function(a){var u=z;return function(){var i=z;z=u;try{return a.apply(this,arguments)}finally{z=i}}}},3840:function(a,u,i){a.exports=i(53)}}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[179],{3143:function(){"use strict";try{self["workbox:window:6.5.4"]&&_()}catch(l){}function n(l,d){return new Promise(function(f){var h=new MessageChannel;h.port1.onmessage=function(l){f(l.data)},l.postMessage(d,[h.port2])})}function t(l,d){for(var f=0;fl.length)&&(d=l.length);for(var f=0,h=Array(d);f=l.length?{done:!0}:{done:!1,value:l[h++]}}}throw TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}return(f=l[Symbol.iterator]()).next.bind(f)}try{self["workbox:core:6.5.4"]&&_()}catch(l){}var i=function(){var l=this;this.promise=new Promise(function(d,f){l.resolve=d,l.reject=f})};function o(l,d){var f=location.href;return new URL(l,f).href===new URL(d,f).href}var u=function(l,d){this.type=l,Object.assign(this,d)};function a(l,d,f){return f?d?d(l):l:(l&&l.then||(l=Promise.resolve(l)),d?l.then(d):l)}function c(){}var l={type:"SKIP_WAITING"};function s(l,d){if(!d)return l&&l.then?l.then(c):Promise.resolve()}var d=function(d){function v(l,f){var h,g;return void 0===f&&(f={}),(h=d.call(this)||this).nn={},h.tn=0,h.rn=new i,h.en=new i,h.on=new i,h.un=0,h.an=new Set,h.cn=function(){var l=h.fn,d=l.installing;h.tn>0||!o(d.scriptURL,h.sn.toString())||performance.now()>h.un+6e4?(h.vn=d,l.removeEventListener("updatefound",h.cn)):(h.hn=d,h.an.add(d),h.rn.resolve(d)),++h.tn,d.addEventListener("statechange",h.ln)},h.ln=function(l){var d=h.fn,f=l.target,g=f.state,y=f===h.vn,P={sw:f,isExternal:y,originalEvent:l};!y&&h.mn&&(P.isUpdate=!0),h.dispatchEvent(new u(g,P)),"installed"===g?h.wn=self.setTimeout(function(){"installed"===g&&d.waiting===f&&h.dispatchEvent(new u("waiting",P))},200):"activating"===g&&(clearTimeout(h.wn),y||h.en.resolve(f))},h.dn=function(l){var d=h.hn,f=d!==navigator.serviceWorker.controller;h.dispatchEvent(new u("controlling",{isExternal:f,originalEvent:l,sw:d,isUpdate:h.mn})),f||h.on.resolve(d)},h.gn=(g=function(l){var d=l.data,f=l.ports,g=l.source;return a(h.getSW(),function(){h.an.has(g)&&h.dispatchEvent(new u("message",{data:d,originalEvent:l,ports:f,sw:g}))})},function(){for(var l=[],d=0;dl.put("/",new Response("",{status:200})))}),window.workbox=new d(window.location.origin+"/sw.js",{scope:"/"}),window.workbox.addEventListener("installed",async({isUpdate:l})=>{if(!l){let l=await caches.open("start-url"),d=await fetch("/"),f=d;d.redirected&&(f=new Response(d.body,{status:200,statusText:"OK",headers:d.headers})),await l.put("/",f)}}),window.workbox.addEventListener("installed",async()=>{let l=window.performance.getEntriesByType("resource").map(l=>l.name).filter(l=>l.startsWith(`${window.location.origin}/_next/data/`)&&l.endsWith(".json")),d=await caches.open("next-data");l.forEach(l=>d.add(l))}),window.workbox.register();{let cacheOnFrontEndNav=function(l){if(window.navigator.onLine&&"/"===l)return fetch("/").then(function(l){return l.redirected?Promise.resolve():caches.open("start-url").then(d=>d.put("/",l))})},l=history.pushState;history.pushState=function(){l.apply(history,arguments),cacheOnFrontEndNav(arguments[2])};let d=history.replaceState;history.replaceState=function(){d.apply(history,arguments),cacheOnFrontEndNav(arguments[2])},window.addEventListener("online",()=>{cacheOnFrontEndNav(window.location.pathname)})}window.addEventListener("online",()=>{location.reload()})}},4878:function(l,d){"use strict";function getDeploymentIdQueryOrEmptyString(){return""}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getDeploymentIdQueryOrEmptyString",{enumerable:!0,get:function(){return getDeploymentIdQueryOrEmptyString}})},37:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var l=/\((.*)\)/.exec(this.toString());return l?l[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(l,d){return d=this.concat.apply([],this),l>1&&d.some(Array.isArray)?d.flat(l-1):d},Array.prototype.flatMap=function(l,d){return this.map(l,d).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(l){if("function"!=typeof l)return this.then(l,l);var d=this.constructor||Promise;return this.then(function(f){return d.resolve(l()).then(function(){return f})},function(f){return d.resolve(l()).then(function(){throw f})})}),Object.fromEntries||(Object.fromEntries=function(l){return Array.from(l).reduce(function(l,d){return l[d[0]]=d[1],l},{})}),Array.prototype.at||(Array.prototype.at=function(l){var d=Math.trunc(l)||0;if(d<0&&(d+=this.length),!(d<0||d>=this.length))return this[d]})},7192:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addBasePath",{enumerable:!0,get:function(){return addBasePath}});let h=f(6063),g=f(2866);function addBasePath(l,d){return(0,g.normalizePathTrailingSlash)((0,h.addPathPrefix)(l,""))}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3607:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addLocale",{enumerable:!0,get:function(){return addLocale}}),f(2866);let addLocale=function(l){for(var d=arguments.length,f=Array(d>1?d-1:0),h=1;h25){window.location.reload();return}clearTimeout(d),d=setTimeout(init,g>5?5e3:1e3)}f&&f.close();let{hostname:y,port:P}=location,b=getSocketProtocol(l.assetPrefix||""),E=l.assetPrefix.replace(/^\/+/,""),S=b+"://"+y+":"+P+(E?"/"+E:"");E.startsWith("http")&&(S=b+"://"+E.split("://",2)[1]),(f=new window.WebSocket(""+S+l.path)).onopen=handleOnline,f.onerror=handleDisconnect,f.onclose=handleDisconnect,f.onmessage=handleMessage}init()}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6864:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"hasBasePath",{enumerable:!0,get:function(){return hasBasePath}});let h=f(387);function hasBasePath(l){return(0,h.pathHasPrefix)(l,"")}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6623:function(l,d){"use strict";let f;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{DOMAttributeNames:function(){return h},isEqualNode:function(){return isEqualNode},default:function(){return initHeadManager}});let h={acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv",noModule:"noModule"};function reactElementToDOM(l){let{type:d,props:f}=l,g=document.createElement(d);for(let l in f){if(!f.hasOwnProperty(l)||"children"===l||"dangerouslySetInnerHTML"===l||void 0===f[l])continue;let y=h[l]||l.toLowerCase();"script"===d&&("async"===y||"defer"===y||"noModule"===y)?g[y]=!!f[l]:g.setAttribute(y,f[l])}let{children:y,dangerouslySetInnerHTML:P}=f;return P?g.innerHTML=P.__html||"":y&&(g.textContent="string"==typeof y?y:Array.isArray(y)?y.join(""):""),g}function isEqualNode(l,d){if(l instanceof HTMLElement&&d instanceof HTMLElement){let f=d.getAttribute("nonce");if(f&&!l.getAttribute("nonce")){let h=d.cloneNode(!0);return h.setAttribute("nonce",""),h.nonce=f,f===l.nonce&&l.isEqualNode(h)}}return l.isEqualNode(d)}function initHeadManager(){return{mountedInstances:new Set,updateHead:l=>{let d={};l.forEach(l=>{if("link"===l.type&&l.props["data-optimized-fonts"]){if(document.querySelector('style[data-href="'+l.props["data-href"]+'"]'))return;l.props.href=l.props["data-href"],l.props["data-href"]=void 0}let f=d[l.type]||[];f.push(l),d[l.type]=f});let h=d.title?d.title[0]:null,g="";if(h){let{children:l}=h.props;g="string"==typeof l?l:Array.isArray(l)?l.join(""):""}g!==document.title&&(document.title=g),["meta","base","link","style","script"].forEach(l=>{f(l,d[l]||[])})}}}f=(l,d)=>{let f=document.getElementsByTagName("head")[0],h=f.querySelector("meta[name=next-head-count]"),g=Number(h.content),y=[];for(let d=0,f=h.previousElementSibling;d{for(let d=0,f=y.length;d{var d;return null==(d=l.parentNode)?void 0:d.removeChild(l)}),b.forEach(l=>f.insertBefore(l,h)),h.content=(g-y.length+b.length).toString()},("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1078:function(l,d,f){"use strict";let h,g,y,P,b,E,S,w,R,O,j,A;Object.defineProperty(d,"__esModule",{value:!0});let M=f(1757);Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{version:function(){return ea},router:function(){return h},emitter:function(){return eo},initialize:function(){return initialize},hydrate:function(){return hydrate}});let C=f(8754);f(37);let I=C._(f(7294)),L=C._(f(745)),x=f(6734),N=C._(f(6860)),D=f(1823),k=f(3937),F=f(9203),U=f(5980),H=f(5612),B=f(109),W=f(4511),q=C._(f(6623)),z=C._(f(804)),G=C._(f(2891)),V=f(8099),X=f(9974),K=f(676),Y=f(869),Q=f(8961),$=f(6864),J=f(9031),Z=f(9642),ee=f(1593),et=C._(f(80)),er=C._(f(5944)),en=C._(f(5677)),ea="14.0.1",eo=(0,N.default)(),looseToArray=l=>[].slice.call(l),ei=!1;let Container=class Container extends I.default.Component{componentDidCatch(l,d){this.props.fn(l,d)}componentDidMount(){this.scrollToHash(),h.isSsr&&(g.isFallback||g.nextExport&&((0,F.isDynamicRoute)(h.pathname)||location.search||ei)||g.props&&g.props.__N_SSG&&(location.search||ei))&&h.replace(h.pathname+"?"+String((0,U.assign)((0,U.urlQueryToSearchParams)(h.query),new URLSearchParams(location.search))),y,{_h:1,shallow:!g.isFallback&&!ei}).catch(l=>{if(!l.cancelled)throw l})}componentDidUpdate(){this.scrollToHash()}scrollToHash(){let{hash:l}=location;if(!(l=l&&l.substring(1)))return;let d=document.getElementById(l);d&&setTimeout(()=>d.scrollIntoView(),0)}render(){return this.props.children}};async function initialize(l){void 0===l&&(l={}),er.default.onSpanEnd(en.default),g=JSON.parse(document.getElementById("__NEXT_DATA__").textContent),window.__NEXT_DATA__=g,A=g.defaultLocale;let d=g.assetPrefix||"";if(self.__next_set_public_path__(""+d+"/_next/"),(0,H.setConfig)({serverRuntimeConfig:{},publicRuntimeConfig:g.runtimeConfig||{}}),y=(0,B.getURL)(),(0,$.hasBasePath)(y)&&(y=(0,Q.removeBasePath)(y)),g.scriptLoader){let{initScriptLoader:l}=f(5354);l(g.scriptLoader)}P=new z.default(g.buildId,d);let register=l=>{let[d,f]=l;return P.routeLoader.onEntrypoint(d,f)};return window.__NEXT_P&&window.__NEXT_P.map(l=>setTimeout(()=>register(l),0)),window.__NEXT_P=[],window.__NEXT_P.push=register,(E=(0,q.default)()).getIsSsr=()=>h.isSsr,b=document.getElementById("__next"),{assetPrefix:d}}function renderApp(l,d){return I.default.createElement(l,d)}function AppContainer(l){var d;let{children:f}=l,g=I.default.useMemo(()=>(0,Z.adaptForAppRouterInstance)(h),[]);return I.default.createElement(Container,{fn:l=>renderError({App:R,err:l}).catch(l=>console.error("Error rendering page: ",l))},I.default.createElement(J.AppRouterContext.Provider,{value:g},I.default.createElement(ee.SearchParamsContext.Provider,{value:(0,Z.adaptForSearchParams)(h)},I.default.createElement(Z.PathnameContextProviderAdapter,{router:h,isAutoExport:null!=(d=self.__NEXT_DATA__.autoExport)&&d},I.default.createElement(ee.PathParamsContext.Provider,{value:(0,Z.adaptForPathParams)(h)},I.default.createElement(D.RouterContext.Provider,{value:(0,X.makePublicRouterInstance)(h)},I.default.createElement(x.HeadManagerContext.Provider,{value:E},I.default.createElement(Y.ImageConfigContext.Provider,{value:{deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1}},f))))))))}let wrapApp=l=>d=>{let f={...d,Component:j,err:g.err,router:h};return I.default.createElement(AppContainer,null,renderApp(l,f))};function renderError(l){let{App:d,err:b}=l;return console.error(b),console.error("A client-side exception has occurred, see here for more info: https://nextjs.org/docs/messages/client-side-exception-occurred"),P.loadPage("/_error").then(h=>{let{page:g,styleSheets:y}=h;return(null==S?void 0:S.Component)===g?Promise.resolve().then(()=>M._(f(6908))).then(h=>Promise.resolve().then(()=>M._(f(1337))).then(f=>(d=f.default,l.App=d,h))).then(l=>({ErrorComponent:l.default,styleSheets:[]})):{ErrorComponent:g,styleSheets:y}}).then(f=>{var P;let{ErrorComponent:E,styleSheets:S}=f,w=wrapApp(d),R={Component:E,AppTree:w,router:h,ctx:{err:b,pathname:g.page,query:g.query,asPath:y,AppTree:w}};return Promise.resolve((null==(P=l.props)?void 0:P.err)?l.props:(0,B.loadGetInitialProps)(d,R)).then(d=>doRender({...l,err:b,Component:E,styleSheets:S,props:d}))})}function Head(l){let{callback:d}=l;return I.default.useLayoutEffect(()=>d(),[d]),null}let el={navigationStart:"navigationStart",beforeRender:"beforeRender",afterRender:"afterRender",afterHydrate:"afterHydrate",routeChange:"routeChange"},eu={hydration:"Next.js-hydration",beforeHydration:"Next.js-before-hydration",routeChangeToRender:"Next.js-route-change-to-render",render:"Next.js-render"},es=null,ec=!0;function clearMarks(){[el.beforeRender,el.afterHydrate,el.afterRender,el.routeChange].forEach(l=>performance.clearMarks(l))}function markHydrateComplete(){if(!B.ST)return;performance.mark(el.afterHydrate);let l=performance.getEntriesByName(el.beforeRender,"mark").length;l&&(performance.measure(eu.beforeHydration,el.navigationStart,el.beforeRender),performance.measure(eu.hydration,el.beforeRender,el.afterHydrate)),O&&performance.getEntriesByName(eu.hydration).forEach(O),clearMarks()}function markRenderComplete(){if(!B.ST)return;performance.mark(el.afterRender);let l=performance.getEntriesByName(el.routeChange,"mark");if(!l.length)return;let d=performance.getEntriesByName(el.beforeRender,"mark").length;d&&(performance.measure(eu.routeChangeToRender,l[0].name,el.beforeRender),performance.measure(eu.render,el.beforeRender,el.afterRender),O&&(performance.getEntriesByName(eu.render).forEach(O),performance.getEntriesByName(eu.routeChangeToRender).forEach(O))),clearMarks(),[eu.routeChangeToRender,eu.render].forEach(l=>performance.clearMeasures(l))}function renderReactElement(l,d){B.ST&&performance.mark(el.beforeRender);let f=d(ec?markHydrateComplete:markRenderComplete);if(es){let l=I.default.startTransition;l(()=>{es.render(f)})}else es=L.default.hydrateRoot(l,f,{onRecoverableError:et.default}),ec=!1}function Root(l){let{callbacks:d,children:f}=l;return I.default.useLayoutEffect(()=>d.forEach(l=>l()),[d]),I.default.useEffect(()=>{(0,G.default)(O)},[]),f}function doRender(l){let d,{App:f,Component:g,props:y,err:P}=l,E="initial"in l?void 0:l.styleSheets;g=g||S.Component,y=y||S.props;let R={...y,Component:g,err:P,router:h};S=R;let O=!1,j=new Promise((l,f)=>{w&&w(),d=()=>{w=null,l()},w=()=>{O=!0,w=null;let l=Error("Cancel rendering route");l.cancelled=!0,f(l)}});function onHeadCommit(){if(E&&!O){let l=new Set(E.map(l=>l.href)),d=looseToArray(document.querySelectorAll("style[data-n-href]")),f=d.map(l=>l.getAttribute("data-n-href"));for(let h=0;h{let{href:d}=l,f=document.querySelector('style[data-n-href="'+d+'"]');f&&(h.parentNode.insertBefore(f,h.nextSibling),h=f)}),looseToArray(document.querySelectorAll("link[data-n-p]")).forEach(l=>{l.parentNode.removeChild(l)})}if(l.scroll){let{x:d,y:f}=l.scroll;(0,k.handleSmoothScroll)(()=>{window.scrollTo(d,f)})}}function onRootCommit(){d()}!function(){if(!E)return;let l=looseToArray(document.querySelectorAll("style[data-n-href]")),d=new Set(l.map(l=>l.getAttribute("data-n-href"))),f=document.querySelector("noscript[data-n-css]"),h=null==f?void 0:f.getAttribute("data-n-css");E.forEach(l=>{let{href:f,text:g}=l;if(!d.has(f)){let l=document.createElement("style");l.setAttribute("data-n-href",f),l.setAttribute("media","x"),h&&l.setAttribute("nonce",h),document.head.appendChild(l),l.appendChild(document.createTextNode(g))}})}();let A=I.default.createElement(I.default.Fragment,null,I.default.createElement(Head,{callback:onHeadCommit}),I.default.createElement(AppContainer,null,renderApp(f,R),I.default.createElement(W.Portal,{type:"next-route-announcer"},I.default.createElement(V.RouteAnnouncer,null))));return renderReactElement(b,l=>I.default.createElement(Root,{callbacks:[l,onRootCommit]},A)),j}async function render(l){if(l.err){await renderError(l);return}try{await doRender(l)}catch(f){let d=(0,K.getProperError)(f);if(d.cancelled)throw d;await renderError({...l,err:d})}}async function hydrate(l){let d=g.err;try{let l=await P.routeLoader.whenEntrypoint("/_app");if("error"in l)throw l.error;let{component:d,exports:f}=l;R=d,f&&f.reportWebVitals&&(O=l=>{let d,{id:h,name:g,startTime:y,value:P,duration:b,entryType:E,entries:S,attribution:w}=l,R=Date.now()+"-"+(Math.floor(Math.random()*(9e12-1))+1e12);S&&S.length&&(d=S[0].startTime);let O={id:h||R,name:g,startTime:y||d,value:null==P?b:P,label:"mark"===E||"measure"===E?"custom":"web-vital"};w&&(O.attribution=w),f.reportWebVitals(O)});let h=await P.routeLoader.whenEntrypoint(g.page);if("error"in h)throw h.error;j=h.component}catch(l){d=(0,K.getProperError)(l)}window.__NEXT_PRELOADREADY&&await window.__NEXT_PRELOADREADY(g.dynamicIds),h=(0,X.createRouter)(g.page,g.query,y,{initialProps:g.props,pageLoader:P,App:R,Component:j,wrapApp,err:d,isFallback:!!g.isFallback,subscription:(l,d,f)=>render(Object.assign({},l,{App:d,scroll:f})),locale:g.locale,locales:g.locales,defaultLocale:A,domainLocales:g.domainLocales,isPreview:g.isPreview}),ei=await h._initialMatchesMiddlewarePromise;let f={App:R,initial:!0,Component:j,props:g.props,err:d};(null==l?void 0:l.beforeRender)&&await l.beforeRender(),render(f)}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6003:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),f(3737);let h=f(1078);window.next={version:h.version,get router(){return h.router},emitter:h.emitter},(0,h.initialize)({}).then(()=>(0,h.hydrate)()).catch(console.error),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},2866:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return normalizePathTrailingSlash}});let h=f(7425),g=f(1156),normalizePathTrailingSlash=l=>{if(!l.startsWith("/"))return l;let{pathname:d,query:f,hash:y}=(0,g.parsePath)(l);return""+(0,h.removeTrailingSlash)(d)+f+y};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},80:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return onRecoverableError}});let h=f(6146);function onRecoverableError(l){let d="function"==typeof reportError?reportError:l=>{window.console.error(l)};l.digest!==h.NEXT_DYNAMIC_NO_SSR_CODE&&d(l)}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},804:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return PageLoader}});let h=f(8754),g=f(7192),y=f(2969),P=h._(f(8356)),b=f(3607),E=f(9203),S=f(1748),w=f(7425),R=f(769);f(2338);let PageLoader=class PageLoader{getPageList(){return(0,R.getClientBuildManifest)().then(l=>l.sortedPages)}getMiddleware(){return window.__MIDDLEWARE_MATCHERS=[],window.__MIDDLEWARE_MATCHERS}getDataHref(l){let{asPath:d,href:f,locale:h}=l,{pathname:R,query:O,search:j}=(0,S.parseRelativeUrl)(f),{pathname:A}=(0,S.parseRelativeUrl)(d),M=(0,w.removeTrailingSlash)(R);if("/"!==M[0])throw Error('Route name should start with a "/", got "'+M+'"');return(l=>{let d=(0,P.default)((0,w.removeTrailingSlash)((0,b.addLocale)(l,h)),".json");return(0,g.addBasePath)("/_next/data/"+this.buildId+d+j,!0)})(l.skipInterpolation?A:(0,E.isDynamicRoute)(M)?(0,y.interpolateAs)(R,A,O).result:M)}_isSsg(l){return this.promisedSsgManifest.then(d=>d.has(l))}loadPage(l){return this.routeLoader.loadRoute(l).then(l=>{if("component"in l)return{page:l.component,mod:l.exports,styleSheets:l.styles.map(l=>({href:l.href,text:l.content}))};throw l.error})}prefetch(l){return this.routeLoader.prefetch(l)}constructor(l,d){this.routeLoader=(0,R.createRouteLoader)(d),this.buildId=l,this.assetPrefix=d,this.promisedSsgManifest=new Promise(l=>{window.__SSG_MANIFEST?l(window.__SSG_MANIFEST):window.__SSG_MANIFEST_CB=()=>{l(window.__SSG_MANIFEST)}})}};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},2891:function(l,d,f){"use strict";let h;Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return _default}});let g=["CLS","FCP","FID","INP","LCP","TTFB"];location.href;let y=!1;function onReport(l){h&&h(l)}let _default=l=>{if(h=l,!y)for(let l of(y=!0,g))try{let d;d||(d=f(8018)),d["on"+l](onReport)}catch(d){console.warn("Failed to track "+l+" web-vital",d)}};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},4511:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"Portal",{enumerable:!0,get:function(){return Portal}});let h=f(7294),g=f(3935),Portal=l=>{let{children:d,type:f}=l,[y,P]=(0,h.useState)(null);return(0,h.useEffect)(()=>{let l=document.createElement(f);return document.body.appendChild(l),P(l),()=>{document.body.removeChild(l)}},[f]),y?(0,g.createPortal)(d,y):null};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},8961:function(l,d,f){"use strict";function removeBasePath(l){return l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeBasePath",{enumerable:!0,get:function(){return removeBasePath}}),f(6864),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5637:function(l,d,f){"use strict";function removeLocale(l,d){return l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeLocale",{enumerable:!0,get:function(){return removeLocale}}),f(1156),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3436:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{requestIdleCallback:function(){return f},cancelIdleCallback:function(){return h}});let f="undefined"!=typeof self&&self.requestIdleCallback&&self.requestIdleCallback.bind(window)||function(l){let d=Date.now();return self.setTimeout(function(){l({didTimeout:!1,timeRemaining:function(){return Math.max(0,50-(Date.now()-d))}})},1)},h="undefined"!=typeof self&&self.cancelIdleCallback&&self.cancelIdleCallback.bind(window)||function(l){return clearTimeout(l)};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},4450:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"resolveHref",{enumerable:!0,get:function(){return resolveHref}});let h=f(5980),g=f(4364),y=f(6455),P=f(109),b=f(2866),E=f(2227),S=f(8410),w=f(2969);function resolveHref(l,d,f){let R;let O="string"==typeof d?d:(0,g.formatWithValidation)(d),j=O.match(/^[a-zA-Z]{1,}:\/\//),A=j?O.slice(j[0].length):O,M=A.split("?",1);if((M[0]||"").match(/(\/\/|\\)/)){console.error("Invalid href '"+O+"' passed to next/router in page: '"+l.pathname+"'. Repeated forward-slashes (//) or backslashes \\ are not valid in the href.");let d=(0,P.normalizeRepeatedSlashes)(A);O=(j?j[0]:"")+d}if(!(0,E.isLocalURL)(O))return f?[O]:O;try{R=new URL(O.startsWith("#")?l.asPath:l.pathname,"http://n")}catch(l){R=new URL("/","http://n")}try{let l=new URL(O,R);l.pathname=(0,b.normalizePathTrailingSlash)(l.pathname);let d="";if((0,S.isDynamicRoute)(l.pathname)&&l.searchParams&&f){let f=(0,h.searchParamsToUrlQuery)(l.searchParams),{result:P,params:b}=(0,w.interpolateAs)(l.pathname,l.pathname,f);P&&(d=(0,g.formatWithValidation)({pathname:P,hash:l.hash,query:(0,y.omit)(f,b)}))}let P=l.origin===R.origin?l.href.slice(l.origin.length):l.href;return f?[P,d||P]:P}catch(l){return f?[O]:O}}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},8099:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{RouteAnnouncer:function(){return RouteAnnouncer},default:function(){return b}});let h=f(8754),g=h._(f(7294)),y=f(9974),P={border:0,clip:"rect(0 0 0 0)",height:"1px",margin:"-1px",overflow:"hidden",padding:0,position:"absolute",top:0,width:"1px",whiteSpace:"nowrap",wordWrap:"normal"},RouteAnnouncer=()=>{let{asPath:l}=(0,y.useRouter)(),[d,f]=g.default.useState(""),h=g.default.useRef(l);return g.default.useEffect(()=>{if(h.current!==l){if(h.current=l,document.title)f(document.title);else{var d;let h=document.querySelector("h1"),g=null!=(d=null==h?void 0:h.innerText)?d:null==h?void 0:h.textContent;f(g||l)}}},[l]),g.default.createElement("p",{"aria-live":"assertive",id:"__next-route-announcer__",role:"alert",style:P},d)},b=RouteAnnouncer;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},769:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{markAssetError:function(){return markAssetError},isAssetError:function(){return isAssetError},getClientBuildManifest:function(){return getClientBuildManifest},createRouteLoader:function(){return createRouteLoader}}),f(8754),f(8356);let h=f(6912),g=f(3436),y=f(4878);function withFuture(l,d,f){let h,g=d.get(l);if(g)return"future"in g?g.future:Promise.resolve(g);let y=new Promise(l=>{h=l});return d.set(l,g={resolve:h,future:y}),f?f().then(l=>(h(l),l)).catch(f=>{throw d.delete(l),f}):y}let P=Symbol("ASSET_LOAD_ERROR");function markAssetError(l){return Object.defineProperty(l,P,{})}function isAssetError(l){return l&&P in l}function hasPrefetch(l){try{return l=document.createElement("link"),!!window.MSInputMethodContext&&!!document.documentMode||l.relList.supports("prefetch")}catch(l){return!1}}let b=hasPrefetch(),getAssetQueryString=()=>(0,y.getDeploymentIdQueryOrEmptyString)();function prefetchViaDom(l,d,f){return new Promise((h,g)=>{let y='\n link[rel="prefetch"][href^="'+l+'"],\n link[rel="preload"][href^="'+l+'"],\n script[src^="'+l+'"]';if(document.querySelector(y))return h();f=document.createElement("link"),d&&(f.as=d),f.rel="prefetch",f.crossOrigin=void 0,f.onload=h,f.onerror=()=>g(markAssetError(Error("Failed to prefetch: "+l))),f.href=l,document.head.appendChild(f)})}function appendScript(l,d){return new Promise((f,h)=>{(d=document.createElement("script")).onload=f,d.onerror=()=>h(markAssetError(Error("Failed to load script: "+l))),d.crossOrigin=void 0,d.src=l,document.body.appendChild(d)})}function resolvePromiseWithTimeout(l,d,f){return new Promise((h,y)=>{let P=!1;l.then(l=>{P=!0,h(l)}).catch(y),(0,g.requestIdleCallback)(()=>setTimeout(()=>{P||y(f)},d))})}function getClientBuildManifest(){if(self.__BUILD_MANIFEST)return Promise.resolve(self.__BUILD_MANIFEST);let l=new Promise(l=>{let d=self.__BUILD_MANIFEST_CB;self.__BUILD_MANIFEST_CB=()=>{l(self.__BUILD_MANIFEST),d&&d()}});return resolvePromiseWithTimeout(l,3800,markAssetError(Error("Failed to load client build manifest")))}function getFilesForRoute(l,d){return getClientBuildManifest().then(f=>{if(!(d in f))throw markAssetError(Error("Failed to lookup route: "+d));let g=f[d].map(d=>l+"/_next/"+encodeURI(d));return{scripts:g.filter(l=>l.endsWith(".js")).map(l=>(0,h.__unsafeCreateTrustedScriptURL)(l)+getAssetQueryString()),css:g.filter(l=>l.endsWith(".css")).map(l=>l+getAssetQueryString())}})}function createRouteLoader(l){let d=new Map,f=new Map,h=new Map,y=new Map;function maybeExecuteScript(l){{let d=f.get(l.toString());return d||(document.querySelector('script[src^="'+l+'"]')?Promise.resolve():(f.set(l.toString(),d=appendScript(l)),d))}}function fetchStyleSheet(l){let d=h.get(l);return d||h.set(l,d=fetch(l).then(d=>{if(!d.ok)throw Error("Failed to load stylesheet: "+l);return d.text().then(d=>({href:l,content:d}))}).catch(l=>{throw markAssetError(l)})),d}return{whenEntrypoint:l=>withFuture(l,d),onEntrypoint(l,f){(f?Promise.resolve().then(()=>f()).then(l=>({component:l&&l.default||l,exports:l}),l=>({error:l})):Promise.resolve(void 0)).then(f=>{let h=d.get(l);h&&"resolve"in h?f&&(d.set(l,f),h.resolve(f)):(f?d.set(l,f):d.delete(l),y.delete(l))})},loadRoute(f,h){return withFuture(f,y,()=>{let g;return resolvePromiseWithTimeout(getFilesForRoute(l,f).then(l=>{let{scripts:h,css:g}=l;return Promise.all([d.has(f)?[]:Promise.all(h.map(maybeExecuteScript)),Promise.all(g.map(fetchStyleSheet))])}).then(l=>this.whenEntrypoint(f).then(d=>({entrypoint:d,styles:l[1]}))),3800,markAssetError(Error("Route did not complete loading: "+f))).then(l=>{let{entrypoint:d,styles:f}=l,h=Object.assign({styles:f},d);return"error"in d?d:h}).catch(l=>{if(h)throw l;return{error:l}}).finally(()=>null==g?void 0:g())})},prefetch(d){let f;return(f=navigator.connection)&&(f.saveData||/2g/.test(f.effectiveType))?Promise.resolve():getFilesForRoute(l,d).then(l=>Promise.all(b?l.scripts.map(l=>prefetchViaDom(l.toString(),"script")):[])).then(()=>{(0,g.requestIdleCallback)(()=>this.loadRoute(d,!0).catch(()=>{}))}).catch(()=>{})}}}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},9974:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{Router:function(){return y.default},default:function(){return O},withRouter:function(){return E.default},useRouter:function(){return useRouter},createRouter:function(){return createRouter},makePublicRouterInstance:function(){return makePublicRouterInstance}});let h=f(8754),g=h._(f(7294)),y=h._(f(2997)),P=f(1823),b=h._(f(676)),E=h._(f(3591)),S={router:null,readyCallbacks:[],ready(l){if(this.router)return l();this.readyCallbacks.push(l)}},w=["pathname","route","query","asPath","components","isFallback","basePath","locale","locales","defaultLocale","isReady","isPreview","isLocaleDomain","domainLocales"],R=["push","replace","reload","back","prefetch","beforePopState"];function getRouter(){if(!S.router)throw Error('No router instance found.\nYou should only use "next/router" on the client side of your app.\n');return S.router}Object.defineProperty(S,"events",{get:()=>y.default.events}),w.forEach(l=>{Object.defineProperty(S,l,{get(){let d=getRouter();return d[l]}})}),R.forEach(l=>{S[l]=function(){for(var d=arguments.length,f=Array(d),h=0;h{S.ready(()=>{y.default.events.on(l,function(){for(var d=arguments.length,f=Array(d),h=0;hl()),S.readyCallbacks=[],S.router}function makePublicRouterInstance(l){let d={};for(let f of w){if("object"==typeof l[f]){d[f]=Object.assign(Array.isArray(l[f])?[]:{},l[f]);continue}d[f]=l[f]}return d.events=y.default.events,R.forEach(f=>{d[f]=function(){for(var d=arguments.length,h=Array(d),g=0;g{if(y.default.preinit){l.forEach(l=>{y.default.preinit(l,{as:"style"})});return}{let d=document.head;l.forEach(l=>{let f=document.createElement("link");f.type="text/css",f.rel="stylesheet",f.href=l,d.appendChild(f)})}},loadScript=l=>{let{src:d,id:f,onLoad:h=()=>{},onReady:g=null,dangerouslySetInnerHTML:y,children:P="",strategy:b="afterInteractive",onError:S,stylesheets:j}=l,A=f||d;if(A&&R.has(A))return;if(w.has(d)){R.add(A),w.get(d).then(h,S);return}let afterLoad=()=>{g&&g(),R.add(A)},M=document.createElement("script"),C=new Promise((l,d)=>{M.addEventListener("load",function(d){l(),h&&h.call(this,d),afterLoad()}),M.addEventListener("error",function(l){d(l)})}).catch(function(l){S&&S(l)});for(let[f,h]of(y?(M.innerHTML=y.__html||"",afterLoad()):P?(M.textContent="string"==typeof P?P:Array.isArray(P)?P.join(""):"",afterLoad()):d&&(M.src=d,w.set(d,C)),Object.entries(l))){if(void 0===h||O.includes(f))continue;let l=E.DOMAttributeNames[f]||f.toLowerCase();M.setAttribute(l,h)}"worker"===b&&M.setAttribute("type","text/partytown"),M.setAttribute("data-nscript",b),j&&insertStylesheets(j),document.body.appendChild(M)};function handleClientScriptLoad(l){let{strategy:d="afterInteractive"}=l;"lazyOnload"===d?window.addEventListener("load",()=>{(0,S.requestIdleCallback)(()=>loadScript(l))}):loadScript(l)}function loadLazyScript(l){"complete"===document.readyState?(0,S.requestIdleCallback)(()=>loadScript(l)):window.addEventListener("load",()=>{(0,S.requestIdleCallback)(()=>loadScript(l))})}function addBeforeInteractiveToCache(){let l=[...document.querySelectorAll('[data-nscript="beforeInteractive"]'),...document.querySelectorAll('[data-nscript="beforePageRender"]')];l.forEach(l=>{let d=l.id||l.getAttribute("src");R.add(d)})}function initScriptLoader(l){l.forEach(handleClientScriptLoad),addBeforeInteractiveToCache()}function Script(l){let{id:d,src:f="",onLoad:h=()=>{},onReady:g=null,strategy:E="afterInteractive",onError:S,stylesheets:w,...O}=l,{updateScripts:j,scripts:A,getIsSsr:M,appDir:C,nonce:I}=(0,P.useContext)(b.HeadManagerContext),L=(0,P.useRef)(!1);(0,P.useEffect)(()=>{let l=d||f;L.current||(g&&l&&R.has(l)&&g(),L.current=!0)},[g,d,f]);let x=(0,P.useRef)(!1);if((0,P.useEffect)(()=>{x.current||("afterInteractive"===E?loadScript(l):"lazyOnload"===E&&loadLazyScript(l),x.current=!0)},[l,E]),("beforeInteractive"===E||"worker"===E)&&(j?(A[E]=(A[E]||[]).concat([{id:d,src:f,onLoad:h,onReady:g,onError:S,...O}]),j(A)):M&&M()?R.add(d||f):M&&!M()&&loadScript(l)),C){if(w&&w.forEach(l=>{y.default.preinit(l,{as:"style"})}),"beforeInteractive"===E)return f?(y.default.preload(f,O.integrity?{as:"script",integrity:O.integrity}:{as:"script"}),P.default.createElement("script",{nonce:I,dangerouslySetInnerHTML:{__html:"(self.__next_s=self.__next_s||[]).push("+JSON.stringify([f])+")"}})):(O.dangerouslySetInnerHTML&&(O.children=O.dangerouslySetInnerHTML.__html,delete O.dangerouslySetInnerHTML),P.default.createElement("script",{nonce:I,dangerouslySetInnerHTML:{__html:"(self.__next_s=self.__next_s||[]).push("+JSON.stringify([0,{...O}])+")"}}));"afterInteractive"===E&&f&&y.default.preload(f,O.integrity?{as:"script",integrity:O.integrity}:{as:"script"})}return null}Object.defineProperty(Script,"__nextScript",{value:!0});let j=Script;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5677:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return reportToSocket}});let h=f(2114);function reportToSocket(l){if("ended"!==l.state.state)throw Error("Expected span to be ended");(0,h.sendMessage)(JSON.stringify({event:"span-end",startTime:l.startTime,endTime:l.state.endTime,spanName:l.name,attributes:l.attributes}))}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5944:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(6860));let Span=class Span{end(l){if("ended"===this.state.state)throw Error("Span has already ended");this.state={state:"ended",endTime:null!=l?l:Date.now()},this.onSpanEnd(this)}constructor(l,d,f){var h,g;this.name=l,this.attributes=null!=(h=d.attributes)?h:{},this.startTime=null!=(g=d.startTime)?g:Date.now(),this.onSpanEnd=f,this.state={state:"inprogress"}}};let Tracer=class Tracer{startSpan(l,d){return new Span(l,d,this.handleSpanEnd)}onSpanEnd(l){return this._emitter.on("spanend",l),()=>{this._emitter.off("spanend",l)}}constructor(){this._emitter=(0,g.default)(),this.handleSpanEnd=l=>{this._emitter.emit("spanend",l)}}};let y=new Tracer;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6912:function(l,d){"use strict";let f;function getPolicy(){if(void 0===f){var l;f=(null==(l=window.trustedTypes)?void 0:l.createPolicy("nextjs",{createHTML:l=>l,createScript:l=>l,createScriptURL:l=>l}))||null}return f}function __unsafeCreateTrustedScriptURL(l){var d;return(null==(d=getPolicy())?void 0:d.createScriptURL(l))||l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"__unsafeCreateTrustedScriptURL",{enumerable:!0,get:function(){return __unsafeCreateTrustedScriptURL}}),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3737:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),f(4878),self.__next_set_public_path__=l=>{f.p=l},("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3591:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return withRouter}});let h=f(8754),g=h._(f(7294)),y=f(9974);function withRouter(l){function WithRouterWrapper(d){return g.default.createElement(l,{router:(0,y.useRouter)(),...d})}return WithRouterWrapper.getInitialProps=l.getInitialProps,WithRouterWrapper.origGetInitialProps=l.origGetInitialProps,WithRouterWrapper}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1337:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return App}});let h=f(8754),g=h._(f(7294)),y=f(109);async function appGetInitialProps(l){let{Component:d,ctx:f}=l,h=await (0,y.loadGetInitialProps)(d,f);return{pageProps:h}}let App=class App extends g.default.Component{render(){let{Component:l,pageProps:d}=this.props;return g.default.createElement(l,d)}};App.origGetInitialProps=appGetInitialProps,App.getInitialProps=appGetInitialProps,("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6908:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return Error}});let h=f(8754),g=h._(f(7294)),y=h._(f(9201)),P={400:"Bad Request",404:"This page could not be found",405:"Method Not Allowed",500:"Internal Server Error"};function _getInitialProps(l){let{res:d,err:f}=l,h=d&&d.statusCode?d.statusCode:f?f.statusCode:404;return{statusCode:h}}let b={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},desc:{lineHeight:"48px"},h1:{display:"inline-block",margin:"0 20px 0 0",paddingRight:23,fontSize:24,fontWeight:500,verticalAlign:"top"},h2:{fontSize:14,fontWeight:400,lineHeight:"28px"},wrap:{display:"inline-block"}};let Error=class Error extends g.default.Component{render(){let{statusCode:l,withDarkMode:d=!0}=this.props,f=this.props.title||P[l]||"An unexpected error has occurred";return g.default.createElement("div",{style:b.error},g.default.createElement(y.default,null,g.default.createElement("title",null,l?l+": "+f:"Application error: a client-side exception has occurred")),g.default.createElement("div",{style:b.desc},g.default.createElement("style",{dangerouslySetInnerHTML:{__html:"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}"+(d?"@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}":"")}}),l?g.default.createElement("h1",{className:"next-error-h1",style:b.h1},l):null,g.default.createElement("div",{style:b.wrap},g.default.createElement("h2",{style:b.h2},this.props.title||l?f:g.default.createElement(g.default.Fragment,null,"Application error: a client-side exception has occurred (see the browser console for more information)"),"."))))}};Error.displayName="ErrorPage",Error.getInitialProps=_getInitialProps,Error.origGetInitialProps=_getInitialProps,("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6861:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"AmpStateContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext({})},7543:function(l,d){"use strict";function isInAmpMode(l){let{ampFirst:d=!1,hybrid:f=!1,hasQuery:h=!1}=void 0===l?{}:l;return d||f&&h}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isInAmpMode",{enumerable:!0,get:function(){return isInAmpMode}})},9031:function(l,d,f){"use strict";var h,g;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{CacheStates:function(){return h},AppRouterContext:function(){return b},LayoutRouterContext:function(){return E},GlobalLayoutRouterContext:function(){return S},TemplateContext:function(){return w}});let y=f(8754),P=y._(f(7294));(g=h||(h={})).LAZY_INITIALIZED="LAZYINITIALIZED",g.DATA_FETCH="DATAFETCH",g.READY="READY";let b=P.default.createContext(null),E=P.default.createContext(null),S=P.default.createContext(null),w=P.default.createContext(null)},684:function(l,d){"use strict";function murmurhash2(l){let d=0;for(let f=0;f>>13,d=Math.imul(d,1540483477)}return d>>>0}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"BloomFilter",{enumerable:!0,get:function(){return BloomFilter}});let BloomFilter=class BloomFilter{static from(l,d){void 0===d&&(d=.01);let f=new BloomFilter(l.length,d);for(let d of l)f.add(d);return f}export(){let l={numItems:this.numItems,errorRate:this.errorRate,numBits:this.numBits,numHashes:this.numHashes,bitArray:this.bitArray};return l}import(l){this.numItems=l.numItems,this.errorRate=l.errorRate,this.numBits=l.numBits,this.numHashes=l.numHashes,this.bitArray=l.bitArray}add(l){let d=this.getHashValues(l);d.forEach(l=>{this.bitArray[l]=1})}contains(l){let d=this.getHashValues(l);return d.every(l=>this.bitArray[l])}getHashValues(l){let d=[];for(let f=1;f<=this.numHashes;f++){let h=murmurhash2(""+l+f)%this.numBits;d.push(h)}return d}constructor(l,d){this.numItems=l,this.errorRate=d,this.numBits=Math.ceil(-(l*Math.log(d))/(Math.log(2)*Math.log(2))),this.numHashes=Math.ceil(this.numBits/l*Math.log(2)),this.bitArray=Array(this.numBits).fill(0)}}},2338:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{MODERN_BROWSERSLIST_TARGET:function(){return g.default},COMPILER_NAMES:function(){return y},INTERNAL_HEADERS:function(){return P},COMPILER_INDEXES:function(){return b},PHASE_EXPORT:function(){return E},PHASE_PRODUCTION_BUILD:function(){return S},PHASE_PRODUCTION_SERVER:function(){return w},PHASE_DEVELOPMENT_SERVER:function(){return R},PHASE_TEST:function(){return O},PHASE_INFO:function(){return j},PAGES_MANIFEST:function(){return A},APP_PATHS_MANIFEST:function(){return M},APP_PATH_ROUTES_MANIFEST:function(){return C},BUILD_MANIFEST:function(){return I},APP_BUILD_MANIFEST:function(){return L},FUNCTIONS_CONFIG_MANIFEST:function(){return x},SUBRESOURCE_INTEGRITY_MANIFEST:function(){return N},NEXT_FONT_MANIFEST:function(){return D},EXPORT_MARKER:function(){return k},EXPORT_DETAIL:function(){return F},PRERENDER_MANIFEST:function(){return U},ROUTES_MANIFEST:function(){return H},IMAGES_MANIFEST:function(){return B},SERVER_FILES_MANIFEST:function(){return W},DEV_CLIENT_PAGES_MANIFEST:function(){return q},MIDDLEWARE_MANIFEST:function(){return z},DEV_MIDDLEWARE_MANIFEST:function(){return G},REACT_LOADABLE_MANIFEST:function(){return V},FONT_MANIFEST:function(){return X},SERVER_DIRECTORY:function(){return K},CONFIG_FILES:function(){return Y},BUILD_ID_FILE:function(){return Q},BLOCKED_PAGES:function(){return $},CLIENT_PUBLIC_FILES_PATH:function(){return J},CLIENT_STATIC_FILES_PATH:function(){return Z},STRING_LITERAL_DROP_BUNDLE:function(){return ee},NEXT_BUILTIN_DOCUMENT:function(){return et},BARREL_OPTIMIZATION_PREFIX:function(){return er},CLIENT_REFERENCE_MANIFEST:function(){return en},SERVER_REFERENCE_MANIFEST:function(){return ea},MIDDLEWARE_BUILD_MANIFEST:function(){return eo},MIDDLEWARE_REACT_LOADABLE_MANIFEST:function(){return ei},CLIENT_STATIC_FILES_RUNTIME_MAIN:function(){return el},CLIENT_STATIC_FILES_RUNTIME_MAIN_APP:function(){return eu},APP_CLIENT_INTERNALS:function(){return es},CLIENT_STATIC_FILES_RUNTIME_REACT_REFRESH:function(){return ec},CLIENT_STATIC_FILES_RUNTIME_AMP:function(){return ed},CLIENT_STATIC_FILES_RUNTIME_WEBPACK:function(){return ef},CLIENT_STATIC_FILES_RUNTIME_POLYFILLS:function(){return ep},CLIENT_STATIC_FILES_RUNTIME_POLYFILLS_SYMBOL:function(){return eh},EDGE_RUNTIME_WEBPACK:function(){return em},TEMPORARY_REDIRECT_STATUS:function(){return eg},PERMANENT_REDIRECT_STATUS:function(){return e_},STATIC_PROPS_ID:function(){return ey},SERVER_PROPS_ID:function(){return ev},PAGE_SEGMENT_KEY:function(){return eP},GOOGLE_FONT_PROVIDER:function(){return eb},OPTIMIZED_FONT_PROVIDERS:function(){return eE},DEFAULT_SERIF_FONT:function(){return eS},DEFAULT_SANS_SERIF_FONT:function(){return ew},STATIC_STATUS_PAGES:function(){return eR},TRACE_OUTPUT_VERSION:function(){return eO},TURBO_TRACE_DEFAULT_MEMORY_LIMIT:function(){return ej},RSC_MODULE_TYPES:function(){return eA},EDGE_UNSUPPORTED_NODE_APIS:function(){return eT},SYSTEM_ENTRYPOINTS:function(){return eM}});let h=f(8754),g=h._(f(8855)),y={client:"client",server:"server",edgeServer:"edge-server"},P=["x-invoke-path","x-invoke-status","x-invoke-error","x-invoke-query","x-middleware-invoke"],b={[y.client]:0,[y.server]:1,[y.edgeServer]:2},E="phase-export",S="phase-production-build",w="phase-production-server",R="phase-development-server",O="phase-test",j="phase-info",A="pages-manifest.json",M="app-paths-manifest.json",C="app-path-routes-manifest.json",I="build-manifest.json",L="app-build-manifest.json",x="functions-config-manifest.json",N="subresource-integrity-manifest",D="next-font-manifest",k="export-marker.json",F="export-detail.json",U="prerender-manifest.json",H="routes-manifest.json",B="images-manifest.json",W="required-server-files.json",q="_devPagesManifest.json",z="middleware-manifest.json",G="_devMiddlewareManifest.json",V="react-loadable-manifest.json",X="font-manifest.json",K="server",Y=["next.config.js","next.config.mjs"],Q="BUILD_ID",$=["/_document","/_app","/_error"],J="public",Z="static",ee="__NEXT_DROP_CLIENT_FILE__",et="__NEXT_BUILTIN_DOCUMENT__",er="__barrel_optimize__",en="client-reference-manifest",ea="server-reference-manifest",eo="middleware-build-manifest",ei="middleware-react-loadable-manifest",el="main",eu=""+el+"-app",es="app-pages-internals",ec="react-refresh",ed="amp",ef="webpack",ep="polyfills",eh=Symbol(ep),em="edge-runtime-webpack",eg=307,e_=308,ey="__N_SSG",ev="__N_SSP",eP="__PAGE__",eb="https://fonts.googleapis.com/",eE=[{url:eb,preconnect:"https://fonts.gstatic.com"},{url:"https://use.typekit.net",preconnect:"https://use.typekit.net"}],eS={name:"Times New Roman",xAvgCharWidth:821,azAvgWidth:854.3953488372093,unitsPerEm:2048},ew={name:"Arial",xAvgCharWidth:904,azAvgWidth:934.5116279069767,unitsPerEm:2048},eR=["/500"],eO=1,ej=6e3,eA={client:"client",server:"server"},eT=["clearImmediate","setImmediate","BroadcastChannel","ByteLengthQueuingStrategy","CompressionStream","CountQueuingStrategy","DecompressionStream","DomException","MessageChannel","MessageEvent","MessagePort","ReadableByteStreamController","ReadableStreamBYOBRequest","ReadableStreamDefaultController","TransformStreamDefaultController","WritableStreamDefaultController"],eM=new Set([el,ec,ed,eu]);("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},997:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"escapeStringRegexp",{enumerable:!0,get:function(){return escapeStringRegexp}});let f=/[|\\{}()[\]^$+*?.-]/,h=/[|\\{}()[\]^$+*?.-]/g;function escapeStringRegexp(l){return f.test(l)?l.replace(h,"\\$&"):l}},6734:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"HeadManagerContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext({})},9201:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{defaultHead:function(){return defaultHead},default:function(){return R}});let h=f(8754),g=f(1757),y=g._(f(7294)),P=h._(f(8955)),b=f(6861),E=f(6734),S=f(7543);function defaultHead(l){void 0===l&&(l=!1);let d=[y.default.createElement("meta",{charSet:"utf-8"})];return l||d.push(y.default.createElement("meta",{name:"viewport",content:"width=device-width"})),d}function onlyReactElement(l,d){return"string"==typeof d||"number"==typeof d?l:d.type===y.default.Fragment?l.concat(y.default.Children.toArray(d.props.children).reduce((l,d)=>"string"==typeof d||"number"==typeof d?l:l.concat(d),[])):l.concat(d)}f(1905);let w=["name","httpEquiv","charSet","itemProp"];function unique(){let l=new Set,d=new Set,f=new Set,h={};return g=>{let y=!0,P=!1;if(g.key&&"number"!=typeof g.key&&g.key.indexOf("$")>0){P=!0;let d=g.key.slice(g.key.indexOf("$")+1);l.has(d)?y=!1:l.add(d)}switch(g.type){case"title":case"base":d.has(g.type)?y=!1:d.add(g.type);break;case"meta":for(let l=0,d=w.length;l{let h=l.key||d;if(!f&&"link"===l.type&&l.props.href&&["https://fonts.googleapis.com/css","https://use.typekit.net/"].some(d=>l.props.href.startsWith(d))){let d={...l.props||{}};return d["data-href"]=d.href,d.href=void 0,d["data-optimized-fonts"]=!0,y.default.cloneElement(l,d)}return y.default.cloneElement(l,{key:h})})}function Head(l){let{children:d}=l,f=(0,y.useContext)(b.AmpStateContext),h=(0,y.useContext)(E.HeadManagerContext);return y.default.createElement(P.default,{reduceComponentsToState:reduceComponents,headManager:h,inAmpMode:(0,S.isInAmpMode)(f)},d)}let R=Head;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1593:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{SearchParamsContext:function(){return g},PathnameContext:function(){return y},PathParamsContext:function(){return P}});let h=f(7294),g=(0,h.createContext)(null),y=(0,h.createContext)(null),P=(0,h.createContext)(null)},1774:function(l,d){"use strict";function normalizeLocalePath(l,d){let f;let h=l.split("/");return(d||[]).some(d=>!!h[1]&&h[1].toLowerCase()===d.toLowerCase()&&(f=d,h.splice(1,1),l=h.join("/")||"/",!0)),{pathname:l,detectedLocale:f}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizeLocalePath",{enumerable:!0,get:function(){return normalizeLocalePath}})},869:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"ImageConfigContext",{enumerable:!0,get:function(){return P}});let h=f(8754),g=h._(f(7294)),y=f(5494),P=g.default.createContext(y.imageConfigDefault)},5494:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{VALID_LOADERS:function(){return f},imageConfigDefault:function(){return h}});let f=["default","imgix","cloudinary","akamai","custom"],h={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",loaderFile:"",domains:[],disableStaticImages:!1,minimumCacheTTL:60,formats:["image/webp"],dangerouslyAllowSVG:!1,contentSecurityPolicy:"script-src 'none'; frame-src 'none'; sandbox;",contentDispositionType:"inline",remotePatterns:[],unoptimized:!1}},5585:function(l,d){"use strict";function getObjectClassLabel(l){return Object.prototype.toString.call(l)}function isPlainObject(l){if("[object Object]"!==getObjectClassLabel(l))return!1;let d=Object.getPrototypeOf(l);return null===d||d.hasOwnProperty("isPrototypeOf")}Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getObjectClassLabel:function(){return getObjectClassLabel},isPlainObject:function(){return isPlainObject}})},6146:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{NEXT_DYNAMIC_NO_SSR_CODE:function(){return f},throwWithNoSSR:function(){return throwWithNoSSR}});let f="NEXT_DYNAMIC_NO_SSR_CODE";function throwWithNoSSR(){let l=Error(f);throw l.digest=f,l}},6860:function(l,d){"use strict";function mitt(){let l=Object.create(null);return{on(d,f){(l[d]||(l[d]=[])).push(f)},off(d,f){l[d]&&l[d].splice(l[d].indexOf(f)>>>0,1)},emit(d){for(var f=arguments.length,h=Array(f>1?f-1:0),g=1;g{l(...h)})}}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return mitt}})},8855:function(l){"use strict";l.exports=["chrome 64","edge 79","firefox 67","opera 51","safari 12"]},3035:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"denormalizePagePath",{enumerable:!0,get:function(){return denormalizePagePath}});let h=f(8410),g=f(9153);function denormalizePagePath(l){let d=(0,g.normalizePathSep)(l);return d.startsWith("/index/")&&!(0,h.isDynamicRoute)(d)?d.slice(6):"/index"!==d?d:"/"}},504:function(l,d){"use strict";function ensureLeadingSlash(l){return l.startsWith("/")?l:"/"+l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"ensureLeadingSlash",{enumerable:!0,get:function(){return ensureLeadingSlash}})},9153:function(l,d){"use strict";function normalizePathSep(l){return l.replace(/\\/g,"/")}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizePathSep",{enumerable:!0,get:function(){return normalizePathSep}})},1823:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"RouterContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext(null)},9642:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{adaptForAppRouterInstance:function(){return adaptForAppRouterInstance},adaptForSearchParams:function(){return adaptForSearchParams},adaptForPathParams:function(){return adaptForPathParams},PathnameContextProviderAdapter:function(){return PathnameContextProviderAdapter}});let h=f(1757),g=h._(f(7294)),y=f(1593),P=f(8410),b=f(106),E=f(2839);function adaptForAppRouterInstance(l){return{back(){l.back()},forward(){l.forward()},refresh(){l.reload()},push(d,f){let{scroll:h}=void 0===f?{}:f;l.push(d,void 0,{scroll:h})},replace(d,f){let{scroll:h}=void 0===f?{}:f;l.replace(d,void 0,{scroll:h})},prefetch(d){l.prefetch(d)}}}function adaptForSearchParams(l){return l.isReady&&l.query?(0,b.asPathToSearchParams)(l.asPath):new URLSearchParams}function adaptForPathParams(l){if(!l.isReady||!l.query)return null;let d={},f=(0,E.getRouteRegex)(l.pathname),h=Object.keys(f.groups);for(let f of h)d[f]=l.query[f];return d}function PathnameContextProviderAdapter(l){let{children:d,router:f,...h}=l,b=(0,g.useRef)(h.isAutoExport),E=(0,g.useMemo)(()=>{let l;let d=b.current;if(d&&(b.current=!1),(0,P.isDynamicRoute)(f.pathname)&&(f.isFallback||d&&!f.isReady))return null;try{l=new URL(f.asPath,"http://f")}catch(l){return"/"}return l.pathname},[f.asPath,f.isFallback,f.isReady,f.pathname]);return g.default.createElement(y.PathnameContext.Provider,{value:E},d)}},2997:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return Router},matchesMiddleware:function(){return matchesMiddleware},createKey:function(){return createKey}});let h=f(8754),g=f(1757),y=f(7425),P=f(769),b=f(5354),E=g._(f(676)),S=f(3035),w=f(1774),R=h._(f(6860)),O=f(109),j=f(9203),A=f(1748);f(2431);let M=f(2142),C=f(2839),I=f(4364);f(6728);let L=f(1156),x=f(3607),N=f(5637),D=f(8961),k=f(7192),F=f(6864),U=f(4450),H=f(9423),B=f(7007),W=f(7841),q=f(7763),z=f(2227),G=f(5119),V=f(6455),X=f(2969),K=f(3937);function buildCancellationError(){return Object.assign(Error("Route Cancelled"),{cancelled:!0})}async function matchesMiddleware(l){let d=await Promise.resolve(l.router.pageLoader.getMiddleware());if(!d)return!1;let{pathname:f}=(0,L.parsePath)(l.asPath),h=(0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,g=(0,k.addBasePath)((0,x.addLocale)(h,l.locale));return d.some(l=>new RegExp(l.regexp).test(g))}function stripOrigin(l){let d=(0,O.getLocationOrigin)();return l.startsWith(d)?l.substring(d.length):l}function prepareUrlAs(l,d,f){let[h,g]=(0,U.resolveHref)(l,d,!0),y=(0,O.getLocationOrigin)(),P=h.startsWith(y),b=g&&g.startsWith(y);h=stripOrigin(h),g=g?stripOrigin(g):g;let E=P?h:(0,k.addBasePath)(h),S=f?stripOrigin((0,U.resolveHref)(l,f)):g||h;return{url:E,as:b?S:(0,k.addBasePath)(S)}}function resolveDynamicRoute(l,d){let f=(0,y.removeTrailingSlash)((0,S.denormalizePagePath)(l));return"/404"===f||"/_error"===f?l:(d.includes(f)||d.some(d=>{if((0,j.isDynamicRoute)(d)&&(0,C.getRouteRegex)(d).re.test(f))return l=d,!0}),(0,y.removeTrailingSlash)(l))}function getMiddlewareData(l,d,f){let h={basePath:f.router.basePath,i18n:{locales:f.router.locales},trailingSlash:!1},g=d.headers.get("x-nextjs-rewrite"),b=g||d.headers.get("x-nextjs-matched-path"),E=d.headers.get("x-matched-path");if(!E||b||E.includes("__next_data_catchall")||E.includes("/_error")||E.includes("/404")||(b=E),b){if(b.startsWith("/")){let d=(0,A.parseRelativeUrl)(b),E=(0,B.getNextPathnameInfo)(d.pathname,{nextConfig:h,parseData:!0}),S=(0,y.removeTrailingSlash)(E.pathname);return Promise.all([f.router.pageLoader.getPageList(),(0,P.getClientBuildManifest)()]).then(y=>{let[P,{__rewrites:b}]=y,R=(0,x.addLocale)(E.pathname,E.locale);if((0,j.isDynamicRoute)(R)||!g&&P.includes((0,w.normalizeLocalePath)((0,D.removeBasePath)(R),f.router.locales).pathname)){let f=(0,B.getNextPathnameInfo)((0,A.parseRelativeUrl)(l).pathname,{nextConfig:h,parseData:!0});R=(0,k.addBasePath)(f.pathname),d.pathname=R}if(!P.includes(S)){let l=resolveDynamicRoute(S,P);l!==S&&(S=l)}let O=P.includes(S)?S:resolveDynamicRoute((0,w.normalizeLocalePath)((0,D.removeBasePath)(d.pathname),f.router.locales).pathname,P);if((0,j.isDynamicRoute)(O)){let l=(0,M.getRouteMatcher)((0,C.getRouteRegex)(O))(R);Object.assign(d.query,l||{})}return{type:"rewrite",parsedAs:d,resolvedHref:O}})}let d=(0,L.parsePath)(l),E=(0,W.formatNextPathnameInfo)({...(0,B.getNextPathnameInfo)(d.pathname,{nextConfig:h,parseData:!0}),defaultLocale:f.router.defaultLocale,buildId:""});return Promise.resolve({type:"redirect-external",destination:""+E+d.query+d.hash})}let S=d.headers.get("x-nextjs-redirect");if(S){if(S.startsWith("/")){let l=(0,L.parsePath)(S),d=(0,W.formatNextPathnameInfo)({...(0,B.getNextPathnameInfo)(l.pathname,{nextConfig:h,parseData:!0}),defaultLocale:f.router.defaultLocale,buildId:""});return Promise.resolve({type:"redirect-internal",newAs:""+d+l.query+l.hash,newUrl:""+d+l.query+l.hash})}return Promise.resolve({type:"redirect-external",destination:S})}return Promise.resolve({type:"next"})}async function withMiddlewareEffects(l){let d=await matchesMiddleware(l);if(!d||!l.fetchData)return null;try{let d=await l.fetchData(),f=await getMiddlewareData(d.dataHref,d.response,l);return{dataHref:d.dataHref,json:d.json,response:d.response,text:d.text,cacheKey:d.cacheKey,effect:f}}catch(l){return null}}let Y=Symbol("SSG_DATA_NOT_FOUND");function fetchRetry(l,d,f){return fetch(l,{credentials:"same-origin",method:f.method||"GET",headers:Object.assign({},f.headers,{"x-nextjs-data":"1"})}).then(h=>!h.ok&&d>1&&h.status>=500?fetchRetry(l,d-1,f):h)}function tryToParseAsJSON(l){try{return JSON.parse(l)}catch(l){return null}}function fetchNextData(l){var d;let{dataHref:f,inflightCache:h,isPrefetch:g,hasMiddleware:y,isServerRender:b,parseJSON:E,persistCache:S,isBackground:w,unstable_skipClientCache:R}=l,{href:O}=new URL(f,window.location.href),getData=l=>fetchRetry(f,b?3:1,{headers:Object.assign({},g?{purpose:"prefetch"}:{},g&&y?{"x-middleware-prefetch":"1"}:{}),method:null!=(d=null==l?void 0:l.method)?d:"GET"}).then(d=>d.ok&&(null==l?void 0:l.method)==="HEAD"?{dataHref:f,response:d,text:"",json:{},cacheKey:O}:d.text().then(l=>{if(!d.ok){if(y&&[301,302,307,308].includes(d.status))return{dataHref:f,response:d,text:l,json:{},cacheKey:O};if(404===d.status){var h;if(null==(h=tryToParseAsJSON(l))?void 0:h.notFound)return{dataHref:f,json:{notFound:Y},response:d,text:l,cacheKey:O}}let g=Error("Failed to load static props");throw b||(0,P.markAssetError)(g),g}return{dataHref:f,json:E?tryToParseAsJSON(l):null,response:d,text:l,cacheKey:O}})).then(l=>(S&&"no-cache"!==l.response.headers.get("x-middleware-cache")||delete h[O],l)).catch(l=>{throw R||delete h[O],("Failed to fetch"===l.message||"NetworkError when attempting to fetch resource."===l.message||"Load failed"===l.message)&&(0,P.markAssetError)(l),l});return R&&S?getData({}).then(l=>(h[O]=Promise.resolve(l),l)):void 0!==h[O]?h[O]:h[O]=getData(w?{method:"HEAD"}:{})}function createKey(){return Math.random().toString(36).slice(2,10)}function handleHardNavigation(l){let{url:d,router:f}=l;if(d===(0,k.addBasePath)((0,x.addLocale)(f.asPath,f.locale)))throw Error("Invariant: attempted to hard navigate to the same URL "+d+" "+location.href);window.location.href=d}let getCancelledHandler=l=>{let{route:d,router:f}=l,h=!1,g=f.clc=()=>{h=!0};return()=>{if(h){let l=Error('Abort fetching component for route: "'+d+'"');throw l.cancelled=!0,l}g===f.clc&&(f.clc=null)}};let Router=class Router{reload(){window.location.reload()}back(){window.history.back()}forward(){window.history.forward()}push(l,d,f){return void 0===f&&(f={}),{url:l,as:d}=prepareUrlAs(this,l,d),this.change("pushState",l,d,f)}replace(l,d,f){return void 0===f&&(f={}),{url:l,as:d}=prepareUrlAs(this,l,d),this.change("replaceState",l,d,f)}async _bfl(l,d,f,h){{let E=!1,S=!1;for(let w of[l,d])if(w){let d=(0,y.removeTrailingSlash)(new URL(w,"http://n").pathname),R=(0,k.addBasePath)((0,x.addLocale)(d,f||this.locale));if(d!==(0,y.removeTrailingSlash)(new URL(this.asPath,"http://n").pathname)){var g,P,b;for(let l of(E=E||!!(null==(g=this._bfl_s)?void 0:g.contains(d))||!!(null==(P=this._bfl_s)?void 0:P.contains(R)),[d,R])){let d=l.split("/");for(let l=0;!S&&l{})}}}}return!1}async change(l,d,f,h,g){var S,w,R,U,H,B,W,G,K;let Q,$;if(!(0,z.isLocalURL)(d))return handleHardNavigation({url:d,router:this}),!1;let J=1===h._h;J||h.shallow||await this._bfl(f,void 0,h.locale);let Z=J||h._shouldResolveHref||(0,L.parsePath)(d).pathname===(0,L.parsePath)(f).pathname,ee={...this.state},et=!0!==this.isReady;this.isReady=!0;let er=this.isSsr;if(J||(this.isSsr=!1),J&&this.clc)return!1;let en=ee.locale;O.ST&&performance.mark("routeChange");let{shallow:ea=!1,scroll:eo=!0}=h,ei={shallow:ea};this._inFlightRoute&&this.clc&&(er||Router.events.emit("routeChangeError",buildCancellationError(),this._inFlightRoute,ei),this.clc(),this.clc=null),f=(0,k.addBasePath)((0,x.addLocale)((0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,h.locale,this.defaultLocale));let el=(0,N.removeLocale)((0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,ee.locale);this._inFlightRoute=f;let eu=en!==ee.locale;if(!J&&this.onlyAHashChange(el)&&!eu){ee.asPath=el,Router.events.emit("hashChangeStart",f,ei),this.changeState(l,d,f,{...h,scroll:!1}),eo&&this.scrollToHash(el);try{await this.set(ee,this.components[ee.route],null)}catch(l){throw(0,E.default)(l)&&l.cancelled&&Router.events.emit("routeChangeError",l,el,ei),l}return Router.events.emit("hashChangeComplete",f,ei),!0}let es=(0,A.parseRelativeUrl)(d),{pathname:ec,query:ed}=es;if(null==(S=this.components[ec])?void 0:S.__appRouter)return handleHardNavigation({url:f,router:this}),new Promise(()=>{});try{[Q,{__rewrites:$}]=await Promise.all([this.pageLoader.getPageList(),(0,P.getClientBuildManifest)(),this.pageLoader.getMiddleware()])}catch(l){return handleHardNavigation({url:f,router:this}),!1}this.urlIsNew(el)||eu||(l="replaceState");let ef=f;ec=ec?(0,y.removeTrailingSlash)((0,D.removeBasePath)(ec)):ec;let ep=(0,y.removeTrailingSlash)(ec),eh=f.startsWith("/")&&(0,A.parseRelativeUrl)(f).pathname,em=!!(eh&&ep!==eh&&(!(0,j.isDynamicRoute)(ep)||!(0,M.getRouteMatcher)((0,C.getRouteRegex)(ep))(eh))),eg=!h.shallow&&await matchesMiddleware({asPath:f,locale:ee.locale,router:this});if(J&&eg&&(Z=!1),Z&&"/_error"!==ec&&(h._shouldResolveHref=!0,es.pathname=resolveDynamicRoute(ec,Q),es.pathname===ec||(ec=es.pathname,es.pathname=(0,k.addBasePath)(ec),eg||(d=(0,I.formatWithValidation)(es)))),!(0,z.isLocalURL)(f))return handleHardNavigation({url:f,router:this}),!1;ef=(0,N.removeLocale)((0,D.removeBasePath)(ef),ee.locale),ep=(0,y.removeTrailingSlash)(ec);let e_=!1;if((0,j.isDynamicRoute)(ep)){let l=(0,A.parseRelativeUrl)(ef),h=l.pathname,g=(0,C.getRouteRegex)(ep);e_=(0,M.getRouteMatcher)(g)(h);let y=ep===h,P=y?(0,X.interpolateAs)(ep,h,ed):{};if(e_&&(!y||P.result))y?f=(0,I.formatWithValidation)(Object.assign({},l,{pathname:P.result,query:(0,V.omit)(ed,P.params)})):Object.assign(ed,e_);else{let l=Object.keys(g.groups).filter(l=>!ed[l]&&!g.groups[l].optional);if(l.length>0&&!eg)throw Error((y?"The provided `href` ("+d+") value is missing query values ("+l.join(", ")+") to be interpolated properly. ":"The provided `as` value ("+h+") is incompatible with the `href` value ("+ep+"). ")+"Read more: https://nextjs.org/docs/messages/"+(y?"href-interpolation-failed":"incompatible-href-as"))}}J||Router.events.emit("routeChangeStart",f,ei);let ey="/404"===this.pathname||"/_error"===this.pathname;try{let y=await this.getRouteInfo({route:ep,pathname:ec,query:ed,as:f,resolvedAs:ef,routeProps:ei,locale:ee.locale,isPreview:ee.isPreview,hasMiddleware:eg,unstable_skipClientCache:h.unstable_skipClientCache,isQueryUpdating:J&&!this.isFallback,isMiddlewareRewrite:em});if(J||h.shallow||await this._bfl(f,"resolvedAs"in y?y.resolvedAs:void 0,ee.locale),"route"in y&&eg){ep=ec=y.route||ep,ei.shallow||(ed=Object.assign({},y.query||{},ed));let l=(0,F.hasBasePath)(es.pathname)?(0,D.removeBasePath)(es.pathname):es.pathname;if(e_&&ec!==l&&Object.keys(e_).forEach(l=>{e_&&ed[l]===e_[l]&&delete ed[l]}),(0,j.isDynamicRoute)(ec)){let l=!ei.shallow&&y.resolvedAs?y.resolvedAs:(0,k.addBasePath)((0,x.addLocale)(new URL(f,location.href).pathname,ee.locale),!0),d=l;(0,F.hasBasePath)(d)&&(d=(0,D.removeBasePath)(d));let h=(0,C.getRouteRegex)(ec),g=(0,M.getRouteMatcher)(h)(new URL(d,location.href).pathname);g&&Object.assign(ed,g)}}if("type"in y){if("redirect-internal"===y.type)return this.change(l,y.newUrl,y.newAs,h);return handleHardNavigation({url:y.destination,router:this}),new Promise(()=>{})}let P=y.Component;if(P&&P.unstable_scriptLoader){let l=[].concat(P.unstable_scriptLoader());l.forEach(l=>{(0,b.handleClientScriptLoad)(l.props)})}if((y.__N_SSG||y.__N_SSP)&&y.props){if(y.props.pageProps&&y.props.pageProps.__N_REDIRECT){h.locale=!1;let d=y.props.pageProps.__N_REDIRECT;if(d.startsWith("/")&&!1!==y.props.pageProps.__N_REDIRECT_BASE_PATH){let f=(0,A.parseRelativeUrl)(d);f.pathname=resolveDynamicRoute(f.pathname,Q);let{url:g,as:y}=prepareUrlAs(this,d,d);return this.change(l,g,y,h)}return handleHardNavigation({url:d,router:this}),new Promise(()=>{})}if(ee.isPreview=!!y.props.__N_PREVIEW,y.props.notFound===Y){let l;try{await this.fetchComponent("/404"),l="/404"}catch(d){l="/_error"}if(y=await this.getRouteInfo({route:l,pathname:l,query:ed,as:f,resolvedAs:ef,routeProps:{shallow:!1},locale:ee.locale,isPreview:ee.isPreview,isNotFound:!0}),"type"in y)throw Error("Unexpected middleware effect on /404")}}J&&"/_error"===this.pathname&&(null==(R=self.__NEXT_DATA__.props)?void 0:null==(w=R.pageProps)?void 0:w.statusCode)===500&&(null==(U=y.props)?void 0:U.pageProps)&&(y.props.pageProps.statusCode=500);let S=h.shallow&&ee.route===(null!=(H=y.route)?H:ep),O=null!=(B=h.scroll)?B:!J&&!S,I=null!=g?g:O?{x:0,y:0}:null,L={...ee,route:ep,pathname:ec,query:ed,asPath:el,isFallback:!1};if(J&&ey){if(y=await this.getRouteInfo({route:this.pathname,pathname:this.pathname,query:ed,as:f,resolvedAs:ef,routeProps:{shallow:!1},locale:ee.locale,isPreview:ee.isPreview,isQueryUpdating:J&&!this.isFallback}),"type"in y)throw Error("Unexpected middleware effect on "+this.pathname);"/_error"===this.pathname&&(null==(G=self.__NEXT_DATA__.props)?void 0:null==(W=G.pageProps)?void 0:W.statusCode)===500&&(null==(K=y.props)?void 0:K.pageProps)&&(y.props.pageProps.statusCode=500);try{await this.set(L,y,I)}catch(l){throw(0,E.default)(l)&&l.cancelled&&Router.events.emit("routeChangeError",l,el,ei),l}return!0}Router.events.emit("beforeHistoryChange",f,ei),this.changeState(l,d,f,h);let N=J&&!I&&!et&&!eu&&(0,q.compareRouterStates)(L,this.state);if(!N){try{await this.set(L,y,I)}catch(l){if(l.cancelled)y.error=y.error||l;else throw l}if(y.error)throw J||Router.events.emit("routeChangeError",y.error,el,ei),y.error;J||Router.events.emit("routeChangeComplete",f,ei),O&&/#.+$/.test(f)&&this.scrollToHash(f)}return!0}catch(l){if((0,E.default)(l)&&l.cancelled)return!1;throw l}}changeState(l,d,f,h){void 0===h&&(h={}),("pushState"!==l||(0,O.getURL)()!==f)&&(this._shallow=h.shallow,window.history[l]({url:d,as:f,options:h,__N:!0,key:this._key="pushState"!==l?this._key:createKey()},"",f))}async handleRouteInfoError(l,d,f,h,g,y){if(console.error(l),l.cancelled)throw l;if((0,P.isAssetError)(l)||y)throw Router.events.emit("routeChangeError",l,h,g),handleHardNavigation({url:h,router:this}),buildCancellationError();try{let h;let{page:g,styleSheets:y}=await this.fetchComponent("/_error"),P={props:h,Component:g,styleSheets:y,err:l,error:l};if(!P.props)try{P.props=await this.getInitialProps(g,{err:l,pathname:d,query:f})}catch(l){console.error("Error in error page `getInitialProps`: ",l),P.props={}}return P}catch(l){return this.handleRouteInfoError((0,E.default)(l)?l:Error(l+""),d,f,h,g,!0)}}async getRouteInfo(l){let{route:d,pathname:f,query:h,as:g,resolvedAs:P,routeProps:b,locale:S,hasMiddleware:R,isPreview:O,unstable_skipClientCache:j,isQueryUpdating:A,isMiddlewareRewrite:M,isNotFound:C}=l,L=d;try{var x,N,k,F;let l=getCancelledHandler({route:L,router:this}),d=this.components[L];if(b.shallow&&d&&this.route===L)return d;R&&(d=void 0);let E=!d||"initial"in d?void 0:d,U={dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:f,query:h}),skipInterpolation:!0,asPath:C?"/404":P,locale:S}),hasMiddleware:!0,isServerRender:this.isSsr,parseJSON:!0,inflightCache:A?this.sbc:this.sdc,persistCache:!O,isPrefetch:!1,unstable_skipClientCache:j,isBackground:A},B=A&&!M?null:await withMiddlewareEffects({fetchData:()=>fetchNextData(U),asPath:C?"/404":P,locale:S,router:this}).catch(l=>{if(A)return null;throw l});if(B&&("/_error"===f||"/404"===f)&&(B.effect=void 0),A&&(B?B.json=self.__NEXT_DATA__.props:B={json:self.__NEXT_DATA__.props}),l(),(null==B?void 0:null==(x=B.effect)?void 0:x.type)==="redirect-internal"||(null==B?void 0:null==(N=B.effect)?void 0:N.type)==="redirect-external")return B.effect;if((null==B?void 0:null==(k=B.effect)?void 0:k.type)==="rewrite"){let l=(0,y.removeTrailingSlash)(B.effect.resolvedHref),g=await this.pageLoader.getPageList();if((!A||g.includes(l))&&(L=l,f=B.effect.resolvedHref,h={...h,...B.effect.parsedAs.query},P=(0,D.removeBasePath)((0,w.normalizeLocalePath)(B.effect.parsedAs.pathname,this.locales).pathname),d=this.components[L],b.shallow&&d&&this.route===L&&!R))return{...d,route:L}}if((0,H.isAPIRoute)(L))return handleHardNavigation({url:g,router:this}),new Promise(()=>{});let W=E||await this.fetchComponent(L).then(l=>({Component:l.page,styleSheets:l.styleSheets,__N_SSG:l.mod.__N_SSG,__N_SSP:l.mod.__N_SSP})),q=null==B?void 0:null==(F=B.response)?void 0:F.headers.get("x-middleware-skip"),z=W.__N_SSG||W.__N_SSP;q&&(null==B?void 0:B.dataHref)&&delete this.sdc[B.dataHref];let{props:G,cacheKey:V}=await this._getData(async()=>{if(z){if((null==B?void 0:B.json)&&!q)return{cacheKey:B.cacheKey,props:B.json};let l=(null==B?void 0:B.dataHref)?B.dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:f,query:h}),asPath:P,locale:S}),d=await fetchNextData({dataHref:l,isServerRender:this.isSsr,parseJSON:!0,inflightCache:q?{}:this.sdc,persistCache:!O,isPrefetch:!1,unstable_skipClientCache:j});return{cacheKey:d.cacheKey,props:d.json||{}}}return{headers:{},props:await this.getInitialProps(W.Component,{pathname:f,query:h,asPath:g,locale:S,locales:this.locales,defaultLocale:this.defaultLocale})}});return W.__N_SSP&&U.dataHref&&V&&delete this.sdc[V],this.isPreview||!W.__N_SSG||A||fetchNextData(Object.assign({},U,{isBackground:!0,persistCache:!1,inflightCache:this.sbc})).catch(()=>{}),G.pageProps=Object.assign({},G.pageProps),W.props=G,W.route=L,W.query=h,W.resolvedAs=P,this.components[L]=W,W}catch(l){return this.handleRouteInfoError((0,E.getProperError)(l),f,h,g,b)}}set(l,d,f){return this.state=l,this.sub(d,this.components["/_app"].Component,f)}beforePopState(l){this._bps=l}onlyAHashChange(l){if(!this.asPath)return!1;let[d,f]=this.asPath.split("#",2),[h,g]=l.split("#",2);return!!g&&d===h&&f===g||d===h&&f!==g}scrollToHash(l){let[,d=""]=l.split("#",2);(0,K.handleSmoothScroll)(()=>{if(""===d||"top"===d){window.scrollTo(0,0);return}let l=decodeURIComponent(d),f=document.getElementById(l);if(f){f.scrollIntoView();return}let h=document.getElementsByName(l)[0];h&&h.scrollIntoView()},{onlyHashChange:this.onlyAHashChange(l)})}urlIsNew(l){return this.asPath!==l}async prefetch(l,d,f){if(void 0===d&&(d=l),void 0===f&&(f={}),(0,G.isBot)(window.navigator.userAgent))return;let h=(0,A.parseRelativeUrl)(l),g=h.pathname,{pathname:P,query:b}=h,E=P,S=await this.pageLoader.getPageList(),w=d,R=void 0!==f.locale?f.locale||void 0:this.locale,O=await matchesMiddleware({asPath:d,locale:R,router:this});h.pathname=resolveDynamicRoute(h.pathname,S),(0,j.isDynamicRoute)(h.pathname)&&(P=h.pathname,h.pathname=P,Object.assign(b,(0,M.getRouteMatcher)((0,C.getRouteRegex)(h.pathname))((0,L.parsePath)(d).pathname)||{}),O||(l=(0,I.formatWithValidation)(h)));let x=await withMiddlewareEffects({fetchData:()=>fetchNextData({dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:E,query:b}),skipInterpolation:!0,asPath:w,locale:R}),hasMiddleware:!0,isServerRender:this.isSsr,parseJSON:!0,inflightCache:this.sdc,persistCache:!this.isPreview,isPrefetch:!0}),asPath:d,locale:R,router:this});if((null==x?void 0:x.effect.type)==="rewrite"&&(h.pathname=x.effect.resolvedHref,P=x.effect.resolvedHref,b={...b,...x.effect.parsedAs.query},w=x.effect.parsedAs.pathname,l=(0,I.formatWithValidation)(h)),(null==x?void 0:x.effect.type)==="redirect-external")return;let N=(0,y.removeTrailingSlash)(P);await this._bfl(d,w,f.locale,!0)&&(this.components[g]={__appRouter:!0}),await Promise.all([this.pageLoader._isSsg(N).then(d=>!!d&&fetchNextData({dataHref:(null==x?void 0:x.json)?null==x?void 0:x.dataHref:this.pageLoader.getDataHref({href:l,asPath:w,locale:R}),isServerRender:!1,parseJSON:!0,inflightCache:this.sdc,persistCache:!this.isPreview,isPrefetch:!0,unstable_skipClientCache:f.unstable_skipClientCache||f.priority&&!0}).then(()=>!1).catch(()=>!1)),this.pageLoader[f.priority?"loadPage":"prefetch"](N)])}async fetchComponent(l){let d=getCancelledHandler({route:l,router:this});try{let f=await this.pageLoader.loadPage(l);return d(),f}catch(l){throw d(),l}}_getData(l){let d=!1,cancel=()=>{d=!0};return this.clc=cancel,l().then(l=>{if(cancel===this.clc&&(this.clc=null),d){let l=Error("Loading initial props cancelled");throw l.cancelled=!0,l}return l})}_getFlightData(l){return fetchNextData({dataHref:l,isServerRender:!0,parseJSON:!1,inflightCache:this.sdc,persistCache:!1,isPrefetch:!1}).then(l=>{let{text:d}=l;return{data:d}})}getInitialProps(l,d){let{Component:f}=this.components["/_app"],h=this._wrapApp(f);return d.AppTree=h,(0,O.loadGetInitialProps)(f,{AppTree:h,Component:l,router:this,ctx:d})}get route(){return this.state.route}get pathname(){return this.state.pathname}get query(){return this.state.query}get asPath(){return this.state.asPath}get locale(){return this.state.locale}get isFallback(){return this.state.isFallback}get isPreview(){return this.state.isPreview}constructor(l,d,h,{initialProps:g,pageLoader:P,App:b,wrapApp:E,Component:S,err:w,subscription:R,isFallback:M,locale:C,locales:L,defaultLocale:x,domainLocales:N,isPreview:D}){this.sdc={},this.sbc={},this.isFirstPopStateEvent=!0,this._key=createKey(),this.onPopState=l=>{let d;let{isFirstPopStateEvent:f}=this;this.isFirstPopStateEvent=!1;let h=l.state;if(!h){let{pathname:l,query:d}=this;this.changeState("replaceState",(0,I.formatWithValidation)({pathname:(0,k.addBasePath)(l),query:d}),(0,O.getURL)());return}if(h.__NA){window.location.reload();return}if(!h.__N||f&&this.locale===h.options.locale&&h.as===this.asPath)return;let{url:g,as:y,options:P,key:b}=h;this._key=b;let{pathname:E}=(0,A.parseRelativeUrl)(g);(!this.isSsr||y!==(0,k.addBasePath)(this.asPath)||E!==(0,k.addBasePath)(this.pathname))&&(!this._bps||this._bps(h))&&this.change("replaceState",g,y,Object.assign({},P,{shallow:P.shallow&&this._shallow,locale:P.locale||this.defaultLocale,_h:0}),d)};let F=(0,y.removeTrailingSlash)(l);this.components={},"/_error"!==l&&(this.components[F]={Component:S,initial:!0,props:g,err:w,__N_SSG:g&&g.__N_SSG,__N_SSP:g&&g.__N_SSP}),this.components["/_app"]={Component:b,styleSheets:[]};{let{BloomFilter:l}=f(684),d={numItems:0,errorRate:.01,numBits:0,numHashes:null,bitArray:[]},h={numItems:0,errorRate:.01,numBits:0,numHashes:null,bitArray:[]};(null==d?void 0:d.numHashes)&&(this._bfl_s=new l(d.numItems,d.errorRate),this._bfl_s.import(d)),(null==h?void 0:h.numHashes)&&(this._bfl_d=new l(h.numItems,h.errorRate),this._bfl_d.import(h))}this.events=Router.events,this.pageLoader=P;let U=(0,j.isDynamicRoute)(l)&&self.__NEXT_DATA__.autoExport;if(this.basePath="",this.sub=R,this.clc=null,this._wrapApp=E,this.isSsr=!0,this.isLocaleDomain=!1,this.isReady=!!(self.__NEXT_DATA__.gssp||self.__NEXT_DATA__.gip||self.__NEXT_DATA__.isExperimentalCompile||self.__NEXT_DATA__.appGip&&!self.__NEXT_DATA__.gsp||!U&&!self.location.search),this.state={route:F,pathname:l,query:d,asPath:U?l:h,isPreview:!!D,locale:void 0,isFallback:M},this._initialMatchesMiddlewarePromise=Promise.resolve(!1),!h.startsWith("//")){let f={locale:C},g=(0,O.getURL)();this._initialMatchesMiddlewarePromise=matchesMiddleware({router:this,locale:C,asPath:g}).then(y=>(f._shouldResolveHref=h!==l,this.changeState("replaceState",y?g:(0,I.formatWithValidation)({pathname:(0,k.addBasePath)(l),query:d}),g,f),y))}window.addEventListener("popstate",this.onPopState)}};Router.events=(0,R.default)()},7699:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addLocale",{enumerable:!0,get:function(){return addLocale}});let h=f(6063),g=f(387);function addLocale(l,d,f,y){if(!d||d===f)return l;let P=l.toLowerCase();return!y&&((0,g.pathHasPrefix)(P,"/api")||(0,g.pathHasPrefix)(P,"/"+d.toLowerCase()))?l:(0,h.addPathPrefix)(l,"/"+d)}},6063:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addPathPrefix",{enumerable:!0,get:function(){return addPathPrefix}});let h=f(1156);function addPathPrefix(l,d){if(!l.startsWith("/")||!d)return l;let{pathname:f,query:g,hash:y}=(0,h.parsePath)(l);return""+d+f+g+y}},4233:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addPathSuffix",{enumerable:!0,get:function(){return addPathSuffix}});let h=f(1156);function addPathSuffix(l,d){if(!l.startsWith("/")||!d)return l;let{pathname:f,query:g,hash:y}=(0,h.parsePath)(l);return""+f+d+g+y}},3090:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{normalizeAppPath:function(){return normalizeAppPath},normalizeRscURL:function(){return normalizeRscURL},normalizePostponedURL:function(){return normalizePostponedURL}});let h=f(504),g=f(6163);function normalizeAppPath(l){return(0,h.ensureLeadingSlash)(l.split("/").reduce((l,d,f,h)=>!d||(0,g.isGroupSegment)(d)||"@"===d[0]||("page"===d||"route"===d)&&f===h.length-1?l:l+"/"+d,""))}function normalizeRscURL(l){return l.replace(/\.rsc($|\?)/,"$1")}function normalizePostponedURL(l){let d=new URL(l),{pathname:f}=d;return f&&f.startsWith("/_next/postponed")?(d.pathname=f.substring(16)||"/",d.toString()):l}},106:function(l,d){"use strict";function asPathToSearchParams(l){return new URL(l,"http://n").searchParams}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"asPathToSearchParams",{enumerable:!0,get:function(){return asPathToSearchParams}})},7763:function(l,d){"use strict";function compareRouterStates(l,d){let f=Object.keys(l);if(f.length!==Object.keys(d).length)return!1;for(let h=f.length;h--;){let g=f[h];if("query"===g){let f=Object.keys(l.query);if(f.length!==Object.keys(d.query).length)return!1;for(let h=f.length;h--;){let g=f[h];if(!d.query.hasOwnProperty(g)||l.query[g]!==d.query[g])return!1}}else if(!d.hasOwnProperty(g)||l[g]!==d[g])return!1}return!0}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"compareRouterStates",{enumerable:!0,get:function(){return compareRouterStates}})},7841:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"formatNextPathnameInfo",{enumerable:!0,get:function(){return formatNextPathnameInfo}});let h=f(7425),g=f(6063),y=f(4233),P=f(7699);function formatNextPathnameInfo(l){let d=(0,P.addLocale)(l.pathname,l.locale,l.buildId?void 0:l.defaultLocale,l.ignorePrefix);return(l.buildId||!l.trailingSlash)&&(d=(0,h.removeTrailingSlash)(d)),l.buildId&&(d=(0,y.addPathSuffix)((0,g.addPathPrefix)(d,"/_next/data/"+l.buildId),"/"===l.pathname?"index.json":".json")),d=(0,g.addPathPrefix)(d,l.basePath),!l.buildId&&l.trailingSlash?d.endsWith("/")?d:(0,y.addPathSuffix)(d,"/"):(0,h.removeTrailingSlash)(d)}},4364:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{formatUrl:function(){return formatUrl},urlObjectKeys:function(){return P},formatWithValidation:function(){return formatWithValidation}});let h=f(1757),g=h._(f(5980)),y=/https?|ftp|gopher|file/;function formatUrl(l){let{auth:d,hostname:f}=l,h=l.protocol||"",P=l.pathname||"",b=l.hash||"",E=l.query||"",S=!1;d=d?encodeURIComponent(d).replace(/%3A/i,":")+"@":"",l.host?S=d+l.host:f&&(S=d+(~f.indexOf(":")?"["+f+"]":f),l.port&&(S+=":"+l.port)),E&&"object"==typeof E&&(E=String(g.urlQueryToSearchParams(E)));let w=l.search||E&&"?"+E||"";return h&&!h.endsWith(":")&&(h+=":"),l.slashes||(!h||y.test(h))&&!1!==S?(S="//"+(S||""),P&&"/"!==P[0]&&(P="/"+P)):S||(S=""),b&&"#"!==b[0]&&(b="#"+b),w&&"?"!==w[0]&&(w="?"+w),""+h+S+(P=P.replace(/[?#]/g,encodeURIComponent))+(w=w.replace("#","%23"))+b}let P=["auth","hash","host","hostname","href","path","pathname","port","protocol","query","search","slashes"];function formatWithValidation(l){return formatUrl(l)}},8356:function(l,d){"use strict";function getAssetPathFromRoute(l,d){void 0===d&&(d="");let f="/"===l?"/index":/^\/index(\/|$)/.test(l)?"/index"+l:""+l;return f+d}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return getAssetPathFromRoute}})},7007:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getNextPathnameInfo",{enumerable:!0,get:function(){return getNextPathnameInfo}});let h=f(1774),g=f(2531),y=f(387);function getNextPathnameInfo(l,d){var f,P;let{basePath:b,i18n:E,trailingSlash:S}=null!=(f=d.nextConfig)?f:{},w={pathname:l,trailingSlash:"/"!==l?l.endsWith("/"):S};b&&(0,y.pathHasPrefix)(w.pathname,b)&&(w.pathname=(0,g.removePathPrefix)(w.pathname,b),w.basePath=b);let R=w.pathname;if(w.pathname.startsWith("/_next/data/")&&w.pathname.endsWith(".json")){let l=w.pathname.replace(/^\/_next\/data\//,"").replace(/\.json$/,"").split("/"),f=l[0];w.buildId=f,R="index"!==l[1]?"/"+l.slice(1).join("/"):"/",!0===d.parseData&&(w.pathname=R)}if(E){let l=d.i18nProvider?d.i18nProvider.analyze(w.pathname):(0,h.normalizeLocalePath)(w.pathname,E.locales);w.locale=l.detectedLocale,w.pathname=null!=(P=l.pathname)?P:w.pathname,!l.detectedLocale&&w.buildId&&(l=d.i18nProvider?d.i18nProvider.analyze(R):(0,h.normalizeLocalePath)(R,E.locales)).detectedLocale&&(w.locale=l.detectedLocale)}return w}},3937:function(l,d){"use strict";function handleSmoothScroll(l,d){if(void 0===d&&(d={}),d.onlyHashChange){l();return}let f=document.documentElement,h=f.style.scrollBehavior;f.style.scrollBehavior="auto",d.dontForceLayout||f.getClientRects(),l(),f.style.scrollBehavior=h}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"handleSmoothScroll",{enumerable:!0,get:function(){return handleSmoothScroll}})},8410:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getSortedRoutes:function(){return h.getSortedRoutes},isDynamicRoute:function(){return g.isDynamicRoute}});let h=f(2677),g=f(9203)},2969:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"interpolateAs",{enumerable:!0,get:function(){return interpolateAs}});let h=f(2142),g=f(2839);function interpolateAs(l,d,f){let y="",P=(0,g.getRouteRegex)(l),b=P.groups,E=(d!==l?(0,h.getRouteMatcher)(P)(d):"")||f;y=l;let S=Object.keys(b);return S.every(l=>{let d=E[l]||"",{repeat:f,optional:h}=b[l],g="["+(f?"...":"")+l+"]";return h&&(g=(d?"":"/")+"["+g+"]"),f&&!Array.isArray(d)&&(d=[d]),(h||l in E)&&(y=y.replace(g,f?d.map(l=>encodeURIComponent(l)).join("/"):encodeURIComponent(d))||"/")})||(y=""),{params:S,result:y}}},5119:function(l,d){"use strict";function isBot(l){return/Googlebot|Mediapartners-Google|AdsBot-Google|googleweblight|Storebot-Google|Google-PageRenderer|Bingbot|BingPreview|Slurp|DuckDuckBot|baiduspider|yandex|sogou|LinkedInBot|bitlybot|tumblr|vkShare|quora link preview|facebookexternalhit|facebookcatalog|Twitterbot|applebot|redditbot|Slackbot|Discordbot|WhatsApp|SkypeUriPreview|ia_archiver/i.test(l)}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isBot",{enumerable:!0,get:function(){return isBot}})},9203:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isDynamicRoute",{enumerable:!0,get:function(){return isDynamicRoute}});let f=/\/\[[^/]+?\](?=\/|$)/;function isDynamicRoute(l){return f.test(l)}},2227:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isLocalURL",{enumerable:!0,get:function(){return isLocalURL}});let h=f(109),g=f(6864);function isLocalURL(l){if(!(0,h.isAbsoluteUrl)(l))return!0;try{let d=(0,h.getLocationOrigin)(),f=new URL(l,d);return f.origin===d&&(0,g.hasBasePath)(f.pathname)}catch(l){return!1}}},6455:function(l,d){"use strict";function omit(l,d){let f={};return Object.keys(l).forEach(h=>{d.includes(h)||(f[h]=l[h])}),f}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"omit",{enumerable:!0,get:function(){return omit}})},1156:function(l,d){"use strict";function parsePath(l){let d=l.indexOf("#"),f=l.indexOf("?"),h=f>-1&&(d<0||f-1?{pathname:l.substring(0,h?f:d),query:h?l.substring(f,d>-1?d:void 0):"",hash:d>-1?l.slice(d):""}:{pathname:l,query:"",hash:""}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"parsePath",{enumerable:!0,get:function(){return parsePath}})},1748:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"parseRelativeUrl",{enumerable:!0,get:function(){return parseRelativeUrl}});let h=f(109),g=f(5980);function parseRelativeUrl(l,d){let f=new URL((0,h.getLocationOrigin)()),y=d?new URL(d,f):l.startsWith(".")?new URL(window.location.href):f,{pathname:P,searchParams:b,search:E,hash:S,href:w,origin:R}=new URL(l,y);if(R!==f.origin)throw Error("invariant: invalid relative URL, router received "+l);return{pathname:P,query:(0,g.searchParamsToUrlQuery)(b),search:E,hash:S,href:w.slice(f.origin.length)}}},387:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"pathHasPrefix",{enumerable:!0,get:function(){return pathHasPrefix}});let h=f(1156);function pathHasPrefix(l,d){if("string"!=typeof l)return!1;let{pathname:f}=(0,h.parsePath)(l);return f===d||f.startsWith(d+"/")}},5980:function(l,d){"use strict";function searchParamsToUrlQuery(l){let d={};return l.forEach((l,f)=>{void 0===d[f]?d[f]=l:Array.isArray(d[f])?d[f].push(l):d[f]=[d[f],l]}),d}function stringifyUrlQueryParam(l){return"string"!=typeof l&&("number"!=typeof l||isNaN(l))&&"boolean"!=typeof l?"":String(l)}function urlQueryToSearchParams(l){let d=new URLSearchParams;return Object.entries(l).forEach(l=>{let[f,h]=l;Array.isArray(h)?h.forEach(l=>d.append(f,stringifyUrlQueryParam(l))):d.set(f,stringifyUrlQueryParam(h))}),d}function assign(l){for(var d=arguments.length,f=Array(d>1?d-1:0),h=1;h{Array.from(d.keys()).forEach(d=>l.delete(d)),d.forEach((d,f)=>l.append(f,d))}),l}Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{searchParamsToUrlQuery:function(){return searchParamsToUrlQuery},urlQueryToSearchParams:function(){return urlQueryToSearchParams},assign:function(){return assign}})},2531:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removePathPrefix",{enumerable:!0,get:function(){return removePathPrefix}});let h=f(387);function removePathPrefix(l,d){if(!(0,h.pathHasPrefix)(l,d))return l;let f=l.slice(d.length);return f.startsWith("/")?f:"/"+f}},7425:function(l,d){"use strict";function removeTrailingSlash(l){return l.replace(/\/$/,"")||"/"}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeTrailingSlash",{enumerable:!0,get:function(){return removeTrailingSlash}})},2142:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getRouteMatcher",{enumerable:!0,get:function(){return getRouteMatcher}});let h=f(109);function getRouteMatcher(l){let{re:d,groups:f}=l;return l=>{let g=d.exec(l);if(!g)return!1;let decode=l=>{try{return decodeURIComponent(l)}catch(l){throw new h.DecodeError("failed to decode param")}},y={};return Object.keys(f).forEach(l=>{let d=f[l],h=g[d.pos];void 0!==h&&(y[l]=~h.indexOf("/")?h.split("/").map(l=>decode(l)):d.repeat?[decode(h)]:decode(h))}),y}}},2839:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getRouteRegex:function(){return getRouteRegex},getNamedRouteRegex:function(){return getNamedRouteRegex},getNamedMiddlewareRegex:function(){return getNamedMiddlewareRegex}});let h=f(2407),g=f(997),y=f(7425);function parseParameter(l){let d=l.startsWith("[")&&l.endsWith("]");d&&(l=l.slice(1,-1));let f=l.startsWith("...");return f&&(l=l.slice(3)),{key:l,repeat:f,optional:d}}function getParametrizedRoute(l){let d=(0,y.removeTrailingSlash)(l).slice(1).split("/"),f={},P=1;return{parameterizedRoute:d.map(l=>{let d=h.INTERCEPTION_ROUTE_MARKERS.find(d=>l.startsWith(d)),y=l.match(/\[((?:\[.*\])|.+)\]/);if(d&&y){let{key:l,optional:h,repeat:b}=parseParameter(y[1]);return f[l]={pos:P++,repeat:b,optional:h},"/"+(0,g.escapeStringRegexp)(d)+"([^/]+?)"}if(!y)return"/"+(0,g.escapeStringRegexp)(l);{let{key:l,repeat:d,optional:h}=parseParameter(y[1]);return f[l]={pos:P++,repeat:d,optional:h},d?h?"(?:/(.+?))?":"/(.+?)":"/([^/]+?)"}}).join(""),groups:f}}function getRouteRegex(l){let{parameterizedRoute:d,groups:f}=getParametrizedRoute(l);return{re:RegExp("^"+d+"(?:/)?$"),groups:f}}function buildGetSafeRouteKey(){let l=0;return()=>{let d="",f=++l;for(;f>0;)d+=String.fromCharCode(97+(f-1)%26),f=Math.floor((f-1)/26);return d}}function getSafeKeyFromSegment(l){let{getSafeRouteKey:d,segment:f,routeKeys:h,keyPrefix:g}=l,{key:y,optional:P,repeat:b}=parseParameter(f),E=y.replace(/\W/g,"");g&&(E=""+g+E);let S=!1;return(0===E.length||E.length>30)&&(S=!0),isNaN(parseInt(E.slice(0,1)))||(S=!0),S&&(E=d()),g?h[E]=""+g+y:h[E]=""+y,b?P?"(?:/(?<"+E+">.+?))?":"/(?<"+E+">.+?)":"/(?<"+E+">[^/]+?)"}function getNamedParametrizedRoute(l,d){let f=(0,y.removeTrailingSlash)(l).slice(1).split("/"),P=buildGetSafeRouteKey(),b={};return{namedParameterizedRoute:f.map(l=>{let f=h.INTERCEPTION_ROUTE_MARKERS.some(d=>l.startsWith(d)),y=l.match(/\[((?:\[.*\])|.+)\]/);return f&&y?getSafeKeyFromSegment({getSafeRouteKey:P,segment:y[1],routeKeys:b,keyPrefix:d?"nxtI":void 0}):y?getSafeKeyFromSegment({getSafeRouteKey:P,segment:y[1],routeKeys:b,keyPrefix:d?"nxtP":void 0}):"/"+(0,g.escapeStringRegexp)(l)}).join(""),routeKeys:b}}function getNamedRouteRegex(l,d){let f=getNamedParametrizedRoute(l,d);return{...getRouteRegex(l),namedRegex:"^"+f.namedParameterizedRoute+"(?:/)?$",routeKeys:f.routeKeys}}function getNamedMiddlewareRegex(l,d){let{parameterizedRoute:f}=getParametrizedRoute(l),{catchAll:h=!0}=d;if("/"===f)return{namedRegex:"^/"+(h?".*":"")+"$"};let{namedParameterizedRoute:g}=getNamedParametrizedRoute(l,!1);return{namedRegex:"^"+g+(h?"(?:(/.*)?)":"")+"$"}}},2677:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getSortedRoutes",{enumerable:!0,get:function(){return getSortedRoutes}});let UrlNode=class UrlNode{insert(l){this._insert(l.split("/").filter(Boolean),[],!1)}smoosh(){return this._smoosh()}_smoosh(l){void 0===l&&(l="/");let d=[...this.children.keys()].sort();null!==this.slugName&&d.splice(d.indexOf("[]"),1),null!==this.restSlugName&&d.splice(d.indexOf("[...]"),1),null!==this.optionalRestSlugName&&d.splice(d.indexOf("[[...]]"),1);let f=d.map(d=>this.children.get(d)._smoosh(""+l+d+"/")).reduce((l,d)=>[...l,...d],[]);if(null!==this.slugName&&f.push(...this.children.get("[]")._smoosh(l+"["+this.slugName+"]/")),!this.placeholder){let d="/"===l?"/":l.slice(0,-1);if(null!=this.optionalRestSlugName)throw Error('You cannot define a route with the same specificity as a optional catch-all route ("'+d+'" and "'+d+"[[..."+this.optionalRestSlugName+']]").');f.unshift(d)}return null!==this.restSlugName&&f.push(...this.children.get("[...]")._smoosh(l+"[..."+this.restSlugName+"]/")),null!==this.optionalRestSlugName&&f.push(...this.children.get("[[...]]")._smoosh(l+"[[..."+this.optionalRestSlugName+"]]/")),f}_insert(l,d,f){if(0===l.length){this.placeholder=!1;return}if(f)throw Error("Catch-all must be the last part of the URL.");let h=l[0];if(h.startsWith("[")&&h.endsWith("]")){let g=h.slice(1,-1),y=!1;if(g.startsWith("[")&&g.endsWith("]")&&(g=g.slice(1,-1),y=!0),g.startsWith("...")&&(g=g.substring(3),f=!0),g.startsWith("[")||g.endsWith("]"))throw Error("Segment names may not start or end with extra brackets ('"+g+"').");if(g.startsWith("."))throw Error("Segment names may not start with erroneous periods ('"+g+"').");function handleSlug(l,f){if(null!==l&&l!==f)throw Error("You cannot use different slug names for the same dynamic path ('"+l+"' !== '"+f+"').");d.forEach(l=>{if(l===f)throw Error('You cannot have the same slug name "'+f+'" repeat within a single dynamic path');if(l.replace(/\W/g,"")===h.replace(/\W/g,""))throw Error('You cannot have the slug names "'+l+'" and "'+f+'" differ only by non-word symbols within a single dynamic path')}),d.push(f)}if(f){if(y){if(null!=this.restSlugName)throw Error('You cannot use both an required and optional catch-all route at the same level ("[...'+this.restSlugName+']" and "'+l[0]+'" ).');handleSlug(this.optionalRestSlugName,g),this.optionalRestSlugName=g,h="[[...]]"}else{if(null!=this.optionalRestSlugName)throw Error('You cannot use both an optional and required catch-all route at the same level ("[[...'+this.optionalRestSlugName+']]" and "'+l[0]+'").');handleSlug(this.restSlugName,g),this.restSlugName=g,h="[...]"}}else{if(y)throw Error('Optional route parameters are not yet supported ("'+l[0]+'").');handleSlug(this.slugName,g),this.slugName=g,h="[]"}}this.children.has(h)||this.children.set(h,new UrlNode),this.children.get(h)._insert(l.slice(1),d,f)}constructor(){this.placeholder=!0,this.children=new Map,this.slugName=null,this.restSlugName=null,this.optionalRestSlugName=null}};function getSortedRoutes(l){let d=new UrlNode;return l.forEach(l=>d.insert(l)),d.smoosh()}},5612:function(l,d){"use strict";let f;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return _default},setConfig:function(){return setConfig}});let _default=()=>f;function setConfig(l){f=l}},6163:function(l,d){"use strict";function isGroupSegment(l){return"("===l[0]&&l.endsWith(")")}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isGroupSegment",{enumerable:!0,get:function(){return isGroupSegment}})},8955:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return SideEffect}});let h=f(7294),g=h.useLayoutEffect,y=h.useEffect;function SideEffect(l){let{headManager:d,reduceComponentsToState:f}=l;function emitChange(){if(d&&d.mountedInstances){let g=h.Children.toArray(Array.from(d.mountedInstances).filter(Boolean));d.updateHead(f(g,l))}}return g(()=>{var f;return null==d||null==(f=d.mountedInstances)||f.add(l.children),()=>{var f;null==d||null==(f=d.mountedInstances)||f.delete(l.children)}}),g(()=>(d&&(d._pendingUpdate=emitChange),()=>{d&&(d._pendingUpdate=emitChange)})),y(()=>(d&&d._pendingUpdate&&(d._pendingUpdate(),d._pendingUpdate=null),()=>{d&&d._pendingUpdate&&(d._pendingUpdate(),d._pendingUpdate=null)})),null}},109:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{WEB_VITALS:function(){return f},execOnce:function(){return execOnce},isAbsoluteUrl:function(){return isAbsoluteUrl},getLocationOrigin:function(){return getLocationOrigin},getURL:function(){return getURL},getDisplayName:function(){return getDisplayName},isResSent:function(){return isResSent},normalizeRepeatedSlashes:function(){return normalizeRepeatedSlashes},loadGetInitialProps:function(){return loadGetInitialProps},SP:function(){return g},ST:function(){return y},DecodeError:function(){return DecodeError},NormalizeError:function(){return NormalizeError},PageNotFoundError:function(){return PageNotFoundError},MissingStaticPage:function(){return MissingStaticPage},MiddlewareNotFoundError:function(){return MiddlewareNotFoundError},stringifyError:function(){return stringifyError}});let f=["CLS","FCP","FID","INP","LCP","TTFB"];function execOnce(l){let d,f=!1;return function(){for(var h=arguments.length,g=Array(h),y=0;yh.test(l);function getLocationOrigin(){let{protocol:l,hostname:d,port:f}=window.location;return l+"//"+d+(f?":"+f:"")}function getURL(){let{href:l}=window.location,d=getLocationOrigin();return l.substring(d.length)}function getDisplayName(l){return"string"==typeof l?l:l.displayName||l.name||"Unknown"}function isResSent(l){return l.finished||l.headersSent}function normalizeRepeatedSlashes(l){let d=l.split("?"),f=d[0];return f.replace(/\\/g,"/").replace(/\/\/+/g,"/")+(d[1]?"?"+d.slice(1).join("?"):"")}async function loadGetInitialProps(l,d){let f=d.res||d.ctx&&d.ctx.res;if(!l.getInitialProps)return d.ctx&&d.Component?{pageProps:await loadGetInitialProps(d.Component,d.ctx)}:{};let h=await l.getInitialProps(d);if(f&&isResSent(f))return h;if(!h){let d='"'+getDisplayName(l)+'.getInitialProps()" should resolve to an object. But found "'+h+'" instead.';throw Error(d)}return h}let g="undefined"!=typeof performance,y=g&&["mark","measure","getEntriesByName"].every(l=>"function"==typeof performance[l]);let DecodeError=class DecodeError extends Error{};let NormalizeError=class NormalizeError extends Error{};let PageNotFoundError=class PageNotFoundError extends Error{constructor(l){super(),this.code="ENOENT",this.name="PageNotFoundError",this.message="Cannot find module for page: "+l}};let MissingStaticPage=class MissingStaticPage extends Error{constructor(l,d){super(),this.message="Failed to load static file for page: "+l+" "+d}};let MiddlewareNotFoundError=class MiddlewareNotFoundError extends Error{constructor(){super(),this.code="ENOENT",this.message="Cannot find the middleware module"}};function stringifyError(l){return JSON.stringify({message:l.message,stack:l.stack})}},1905:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"warnOnce",{enumerable:!0,get:function(){return warnOnce}});let warnOnce=l=>{}},8018:function(l){var d,f,h,g,y,P,b,E,S,w,R,O,j,A,M,C,I,L,x,N,D,k,F,U,H,B,W,q,z,G,V,X,K,Y,Q,$,J,Z,ee,et,er,en,ea,eo,ei,el;(d={}).d=function(l,f){for(var h in f)d.o(f,h)&&!d.o(l,h)&&Object.defineProperty(l,h,{enumerable:!0,get:f[h]})},d.o=function(l,d){return Object.prototype.hasOwnProperty.call(l,d)},d.r=function(l){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(l,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(l,"__esModule",{value:!0})},void 0!==d&&(d.ab="//"),f={},d.r(f),d.d(f,{getCLS:function(){return F},getFCP:function(){return N},getFID:function(){return G},getINP:function(){return en},getLCP:function(){return eo},getTTFB:function(){return el},onCLS:function(){return F},onFCP:function(){return N},onFID:function(){return G},onINP:function(){return en},onLCP:function(){return eo},onTTFB:function(){return el}}),E=-1,S=function(l){addEventListener("pageshow",function(d){d.persisted&&(E=d.timeStamp,l(d))},!0)},w=function(){return window.performance&&performance.getEntriesByType&&performance.getEntriesByType("navigation")[0]},R=function(){var l=w();return l&&l.activationStart||0},O=function(l,d){var f=w(),h="navigate";return E>=0?h="back-forward-cache":f&&(h=document.prerendering||R()>0?"prerender":f.type.replace(/_/g,"-")),{name:l,value:void 0===d?-1:d,rating:"good",delta:0,entries:[],id:"v3-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12),navigationType:h}},j=function(l,d,f){try{if(PerformanceObserver.supportedEntryTypes.includes(l)){var h=new PerformanceObserver(function(l){d(l.getEntries())});return h.observe(Object.assign({type:l,buffered:!0},f||{})),h}}catch(l){}},A=function(l,d){var T=function t(f){"pagehide"!==f.type&&"hidden"!==document.visibilityState||(l(f),d&&(removeEventListener("visibilitychange",t,!0),removeEventListener("pagehide",t,!0)))};addEventListener("visibilitychange",T,!0),addEventListener("pagehide",T,!0)},M=function(l,d,f,h){var g,y;return function(P){var b;d.value>=0&&(P||h)&&((y=d.value-(g||0))||void 0===g)&&(g=d.value,d.delta=y,d.rating=(b=d.value)>f[1]?"poor":b>f[0]?"needs-improvement":"good",l(d))}},C=-1,I=function(){return"hidden"!==document.visibilityState||document.prerendering?1/0:0},L=function(){A(function(l){C=l.timeStamp},!0)},x=function(){return C<0&&(C=I(),L(),S(function(){setTimeout(function(){C=I(),L()},0)})),{get firstHiddenTime(){return C}}},N=function(l,d){d=d||{};var f,h=[1800,3e3],g=x(),y=O("FCP"),c=function(l){l.forEach(function(l){"first-contentful-paint"===l.name&&(b&&b.disconnect(),l.startTime-1&&l(d)},g=O("CLS",0),y=0,P=[],p=function(l){l.forEach(function(l){if(!l.hadRecentInput){var d=P[0],f=P[P.length-1];y&&l.startTime-f.startTime<1e3&&l.startTime-d.startTime<5e3?(y+=l.value,P.push(l)):(y=l.value,P=[l]),y>g.value&&(g.value=y,g.entries=P,h())}})},b=j("layout-shift",p);b&&(h=M(i,g,f,d.reportAllChanges),A(function(){p(b.takeRecords()),h(!0)}),S(function(){y=0,k=-1,h=M(i,g=O("CLS",0),f,d.reportAllChanges)}))},U={passive:!0,capture:!0},H=new Date,B=function(l,d){h||(h=d,g=l,y=new Date,z(removeEventListener),W())},W=function(){if(g>=0&&g1e12?new Date:performance.now())-l.timeStamp;"pointerdown"==l.type?(d=function(){B(g,l),h()},f=function(){h()},h=function(){removeEventListener("pointerup",d,U),removeEventListener("pointercancel",f,U)},addEventListener("pointerup",d,U),addEventListener("pointercancel",f,U)):B(g,l)}},z=function(l){["mousedown","keydown","touchstart","pointerdown"].forEach(function(d){return l(d,q,U)})},G=function(l,d){d=d||{};var f,y=[100,300],b=x(),E=O("FID"),v=function(l){l.startTimed.latency){if(f)f.entries.push(l),f.latency=Math.max(f.latency,l.duration);else{var h={id:l.interactionId,latency:l.duration,entries:[l]};et[h.id]=h,ee.push(h)}ee.sort(function(l,d){return d.latency-l.latency}),ee.splice(10).forEach(function(l){delete et[l.id]})}},en=function(l,d){d=d||{};var f=[200,500];$();var h,g=O("INP"),a=function(l){l.forEach(function(l){l.interactionId&&er(l),"first-input"!==l.entryType||ee.some(function(d){return d.entries.some(function(d){return l.duration===d.duration&&l.startTime===d.startTime})})||er(l)});var d,f=(d=Math.min(ee.length-1,Math.floor(Z()/50)),ee[d]);f&&f.latency!==g.value&&(g.value=f.latency,g.entries=f.entries,h())},y=j("event",a,{durationThreshold:d.durationThreshold||40});h=M(l,g,f,d.reportAllChanges),y&&(y.observe({type:"first-input",buffered:!0}),A(function(){a(y.takeRecords()),g.value<0&&Z()>0&&(g.value=0,g.entries=[]),h(!0)}),S(function(){ee=[],J=Q(),h=M(l,g=O("INP"),f,d.reportAllChanges)}))},ea={},eo=function(l,d){d=d||{};var f,h=[2500,4e3],g=x(),y=O("LCP"),c=function(l){var d=l[l.length-1];if(d){var h=d.startTime-R();hperformance.now())return;h.entries=[y],g(!0),S(function(){(g=M(l,h=O("TTFB",0),f,d.reportAllChanges))(!0)})}})},l.exports=f},9423:function(l,d){"use strict";function isAPIRoute(l){return"/api"===l||!!(null==l?void 0:l.startsWith("/api/"))}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isAPIRoute",{enumerable:!0,get:function(){return isAPIRoute}})},676:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return isError},getProperError:function(){return getProperError}});let h=f(5585);function isError(l){return"object"==typeof l&&null!==l&&"name"in l&&"message"in l}function getProperError(l){return isError(l)?l:Error((0,h.isPlainObject)(l)?JSON.stringify(l):l+"")}},2407:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{INTERCEPTION_ROUTE_MARKERS:function(){return g},isInterceptionRouteAppPath:function(){return isInterceptionRouteAppPath},extractInterceptionRouteInformation:function(){return extractInterceptionRouteInformation}});let h=f(3090),g=["(..)(..)","(.)","(..)","(...)"];function isInterceptionRouteAppPath(l){return void 0!==l.split("/").find(l=>g.find(d=>l.startsWith(d)))}function extractInterceptionRouteInformation(l){let d,f,y;for(let h of l.split("/"))if(f=g.find(l=>h.startsWith(l))){[d,y]=l.split(f,2);break}if(!d||!f||!y)throw Error(`Invalid interception route: ${l}. Must be in the format //(..|...|..)(..)/`);switch(d=(0,h.normalizeAppPath)(d),f){case"(.)":y="/"===d?`/${y}`:d+"/"+y;break;case"(..)":if("/"===d)throw Error(`Invalid interception route: ${l}. Cannot use (..) marker at the root level, use (.) instead.`);y=d.split("/").slice(0,-1).concat(y).join("/");break;case"(...)":y="/"+y;break;case"(..)(..)":let P=d.split("/");if(P.length<=2)throw Error(`Invalid interception route: ${l}. Cannot use (..)(..) marker at the root level or one level up.`);y=P.slice(0,-2).concat(y).join("/");break;default:throw Error("Invariant: unexpected marker")}return{interceptingRoute:d,interceptedRoute:y}}},2431:function(){},8754:function(l,d,f){"use strict";function _interop_require_default(l){return l&&l.__esModule?l:{default:l}}f.r(d),f.d(d,{_:function(){return _interop_require_default},_interop_require_default:function(){return _interop_require_default}})},1757:function(l,d,f){"use strict";function _getRequireWildcardCache(l){if("function"!=typeof WeakMap)return null;var d=new WeakMap,f=new WeakMap;return(_getRequireWildcardCache=function(l){return l?f:d})(l)}function _interop_require_wildcard(l,d){if(!d&&l&&l.__esModule)return l;if(null===l||"object"!=typeof l&&"function"!=typeof l)return{default:l};var f=_getRequireWildcardCache(d);if(f&&f.has(l))return f.get(l);var h={},g=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var y in l)if("default"!==y&&Object.prototype.hasOwnProperty.call(l,y)){var P=g?Object.getOwnPropertyDescriptor(l,y):null;P&&(P.get||P.set)?Object.defineProperty(h,y,P):h[y]=l[y]}return h.default=l,f&&f.set(l,h),h}f.r(d),f.d(d,{_:function(){return _interop_require_wildcard},_interop_require_wildcard:function(){return _interop_require_wildcard}})}},function(l){var __webpack_exec__=function(d){return l(l.s=d)};l.O(0,[774],function(){return __webpack_exec__(3143),__webpack_exec__(6003)}),_N_E=l.O()}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[888],{6840:function(e,n,t){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_app",function(){return t(5913)}])},5913:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return MyApp}});var i=t(5893),c=t(9008),s=t.n(c);function MyApp(e){let{Component:n,pageProps:t}=e;return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsxs)(s(),{children:[(0,i.jsx)("meta",{charSet:"utf-8"}),(0,i.jsx)("meta",{httpEquiv:"X-UA-Compatible",content:"IE=edge"}),(0,i.jsx)("meta",{name:"viewport",content:"width=device-width,initial-scale=1,minimum-scale=1,maximum-scale=1,user-scalable=no"}),(0,i.jsx)("meta",{name:"description",content:"Description"}),(0,i.jsx)("meta",{name:"keywords",content:"Keywords"}),(0,i.jsx)("title",{children:"Laconic Test PWA"}),(0,i.jsx)("link",{rel:"manifest",href:"/manifest.json"}),(0,i.jsx)("link",{href:"/icons/favicon-16x16.png",rel:"icon",type:"image/png",sizes:"16x16"}),(0,i.jsx)("link",{href:"/icons/favicon-32x32.png",rel:"icon",type:"image/png",sizes:"32x32"}),(0,i.jsx)("link",{rel:"apple-touch-icon",href:"/apple-icon.png"}),(0,i.jsx)("meta",{name:"theme-color",content:"#317EFB"})]}),(0,i.jsx)(n,{...t})]})}t(415)},415:function(){},9008:function(e,n,t){e.exports=t(9201)}},function(e){var __webpack_exec__=function(n){return e(e.s=n)};e.O(0,[774,179],function(){return __webpack_exec__(6840),__webpack_exec__(9974)}),_N_E=e.O()}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[405],{8312:function(e,c,n){(window.__NEXT_P=window.__NEXT_P||[]).push(["/",function(){return n(2627)}])},2627:function(e,c,n){"use strict";n.r(c),n.d(c,{default:function(){return Home}});var _=n(5893),s=n(6612),o=n.n(s);function Home(){return(0,_.jsxs)("div",{className:o().container,children:[(0,_.jsxs)("main",{className:o().main,children:[(0,_.jsxs)("h1",{className:o().title,children:["Welcome to ",(0,_.jsx)("a",{href:"https://www.laconic.com/",children:"Laconic!"})]}),(0,_.jsxs)("div",{className:o().grid,children:[(0,_.jsxs)("p",{className:o().card,children:["CONFIG1 has value: ","this string"]}),(0,_.jsxs)("p",{className:o().card,children:["CONFIG2 has value: ","this different string"]}),(0,_.jsxs)("p",{className:o().card,children:["WEBAPP_DEBUG has value: ","44ec6317-c911-47ff-86c1-d36c42ae9383"]})]})]}),(0,_.jsx)("footer",{className:o().footer,children:(0,_.jsxs)("a",{href:"https://www.laconic.com/",target:"_blank",rel:"noopener noreferrer",children:["Powered by \xa0",(0,_.jsxs)("svg",{width:"133",height:"24",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[(0,_.jsx)("path",{d:"M37.761 22.302h9.246v-2.704h-6.155v-17.9h-3.09v20.604ZM59.314 1.697h-5.126l-5.357 20.605h3.194l1.34-5.151h6.618l1.34 5.151h3.348L59.314 1.697Zm-5.306 12.878 2.679-10.663h.103l2.575 10.663h-5.357ZM74.337 9.682h3.606c0-5.873-1.88-8.397-6.259-8.397-4.61 0-6.593 3.194-6.593 10.689 0 7.52 1.983 10.74 6.593 10.74 4.379 0 6.259-2.447 6.285-8.139h-3.606c-.026 4.456-.567 5.563-2.679 5.563-2.42 0-3.013-1.622-2.987-8.164 0-6.516.592-8.14 2.987-8.113 2.112 0 2.653 1.159 2.653 5.82ZM86.689 1.285c4.687.026 6.696 3.245 6.696 10.715 0 7.469-2.009 10.688-6.696 10.714-4.714.026-6.723-3.194-6.723-10.714 0-7.521 2.01-10.74 6.723-10.715ZM83.572 12c0 6.516.618 8.139 3.117 8.139 2.472 0 3.09-1.623 3.09-8.14 0-6.541-.618-8.164-3.09-8.138-2.499.026-3.117 1.648-3.117 8.139ZM99.317 22.276l-3.09.026V1.697h5.434l5.074 16.793h.052V1.697h3.09v20.605h-5.099l-5.409-18.08h-.052v18.054ZM116.615 1.697h-3.091v20.605h3.091V1.697ZM128.652 9.682h3.606c0-5.873-1.881-8.397-6.259-8.397-4.61 0-6.594 3.194-6.594 10.689 0 7.52 1.984 10.74 6.594 10.74 4.378 0 6.259-2.447 6.284-8.139h-3.605c-.026 4.456-.567 5.563-2.679 5.563-2.421 0-3.014-1.622-2.988-8.164 0-6.516.593-8.14 2.988-8.113 2.112 0 2.653 1.159 2.653 5.82Z",fill:"#000000"}),(0,_.jsx)("path",{fillRule:"evenodd",clipRule:"evenodd",d:"M4.05 12.623A15.378 15.378 0 0 0 8.57 1.714C8.573 1.136 8.54.564 8.477 0H0v16.287c0 1.974.752 3.949 2.258 5.454A7.69 7.69 0 0 0 7.714 24L24 24v-8.477a15.636 15.636 0 0 0-1.715-.095c-4.258 0-8.115 1.73-10.908 4.523-2.032 1.981-5.291 1.982-7.299-.026-2.006-2.006-2.007-5.266-.029-7.302Zm18.192-10.86a6.004 6.004 0 0 0-8.485 0 6.003 6.003 0 0 0 0 8.484 6.003 6.003 0 0 0 8.485 0 6.002 6.002 0 0 0 0-8.485Z",fill:"#000000"})]})]})})]})}},6612:function(e){e.exports={container:"Home_container__d256j",main:"Home_main__VkIEL",footer:"Home_footer__yFiaX",title:"Home_title__hYX6j",description:"Home_description__uXNdx",code:"Home_code__VVrIr",grid:"Home_grid__AVljO",card:"Home_card__E5spL",logo:"Home_logo__IOQAX"}}},function(e){e.O(0,[774,888,179],function(){return e(e.s=8312)}),_N_E=e.O()}]);self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/":["static/css/3571059724d711eb.css","static/chunks/pages/index-08151452ae5af5e0.js"],"/_error":["static/chunks/pages/_error-ee5b5fb91d29d86f.js"],sortedPages:["/","/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();self.__SSG_MANIFEST=new Set,self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB(); \ No newline at end of file From d9bcc088a87e32e88bc58f53e75c396410874d62 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 7 Nov 2023 18:27:08 -0600 Subject: [PATCH 15/62] Enable webapp test in GitHub CI. (#627) --- .github/workflows/test-webapp.yml | 29 ++++++++++++++++++++++++++++ tests/webapp-test/run-webapp-test.sh | 4 ++-- 2 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/test-webapp.yml diff --git a/.github/workflows/test-webapp.yml b/.github/workflows/test-webapp.yml new file mode 100644 index 00000000..3b920828 --- /dev/null +++ b/.github/workflows/test-webapp.yml @@ -0,0 +1,29 @@ +name: Webapp Test + +on: + pull_request: + branches: '*' + push: + branches: '*' + +jobs: + test: + name: "Run webapp test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + - name: "Install Python" + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: "Run webapp tests" + run: ./tests/webapp-test/run-webapp-test.sh diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh index 75c4cbd1..71b4da16 100755 --- a/tests/webapp-test/run-webapp-test.sh +++ b/tests/webapp-test/run-webapp-test.sh @@ -51,7 +51,7 @@ else echo "BEFORE: PASSED" fi -grep "`uuidgen`" test.after > /dev/null +grep "$UUID" test.after > /dev/null if [ $? -ne 0 ]; then echo "AFTER: FAILED" exit 1 @@ -59,4 +59,4 @@ else echo "AFTER: PASSED" fi -exit 0 \ No newline at end of file +exit 0 From 36e13f71997fbcf048fa4e4b5b811c4b2bc6b002 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 7 Nov 2023 23:10:32 -0600 Subject: [PATCH 16/62] Remove test output from tree. (#628) --- tests/webapp-test/test.before | 34 ---------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 tests/webapp-test/test.before diff --git a/tests/webapp-test/test.before b/tests/webapp-test/test.before deleted file mode 100644 index e349f10d..00000000 --- a/tests/webapp-test/test.before +++ /dev/null @@ -1,34 +0,0 @@ -Laconic Test PWA

Welcome to Laconic!

CONFIG1 has value: CERC_RUNTIME_ENV_CERC_TEST_WEBAPP_CONFIG1

CONFIG2 has value: CERC_RUNTIME_ENV_CERC_TEST_WEBAPP_CONFIG2

WEBAPP_DEBUG has value: CERC_RUNTIME_ENV_CERC_WEBAPP_DEBUG

body,html{padding:0;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif}a{color:inherit;text-decoration:none}*{box-sizing:border-box}.Home_container__d256j{min-height:100vh;padding:0 .5rem;flex-direction:column}.Home_container__d256j,.Home_main__VkIEL{display:flex;justify-content:center;align-items:center}.Home_main__VkIEL{padding:5rem 0;flex:1 1;flex-direction:column}.Home_footer__yFiaX{width:100%;height:100px;border-top:1px solid #eaeaea;display:flex;justify-content:center;align-items:center}.Home_footer__yFiaX img{margin-left:.5rem}.Home_footer__yFiaX a{display:flex;justify-content:center;align-items:center}.Home_title__hYX6j a{color:#0070f3;text-decoration:none}.Home_title__hYX6j a:active,.Home_title__hYX6j a:focus,.Home_title__hYX6j a:hover{text-decoration:underline}.Home_title__hYX6j{margin:0;line-height:1.15;font-size:4rem}.Home_description__uXNdx,.Home_title__hYX6j{text-align:center}.Home_description__uXNdx{line-height:1.5;font-size:1.5rem}.Home_code__VVrIr{background:#fafafa;border-radius:5px;padding:.75rem;font-size:1.1rem;font-family:Menlo,Monaco,Lucida Console,Liberation Mono,DejaVu Sans Mono,Bitstream Vera Sans Mono,Courier New,monospace}.Home_grid__AVljO{display:flex;align-items:center;justify-content:center;flex-wrap:wrap;max-width:800px;margin-top:3rem}.Home_card__E5spL{margin:1rem;flex-basis:45%;padding:1.5rem;text-align:left;color:inherit;text-decoration:none;border:1px solid #eaeaea;border-radius:10px;transition:color .15s ease,border-color .15s ease}.Home_card__E5spL:active,.Home_card__E5spL:focus,.Home_card__E5spL:hover{color:#0070f3;border-color:#0070f3}.Home_card__E5spL h3{margin:0 0 1rem;font-size:1.5rem}.Home_card__E5spL p{margin:0;font-size:1.25rem;line-height:1.5}.Home_logo__IOQAX{height:1em}@media (max-width:600px){.Home_grid__AVljO{width:100%;flex-direction:column}}!function(){var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(t){var e={exports:{}};return t(e,e.exports),e.exports}var r=function(t){return t&&t.Math==Math&&t},n=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof t&&t)||Function("return this")(),o=function(t){try{return!!t()}catch(t){return!0}},i=!o(function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}),a={}.propertyIsEnumerable,u=Object.getOwnPropertyDescriptor,s=u&&!a.call({1:2},1)?function(t){var e=u(this,t);return!!e&&e.enumerable}:a,c={f:s},f=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}},l={}.toString,h=function(t){return l.call(t).slice(8,-1)},p="".split,d=o(function(){return!Object("z").propertyIsEnumerable(0)})?function(t){return"String"==h(t)?p.call(t,""):Object(t)}:Object,v=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t},g=function(t){return d(v(t))},y=function(t){return"object"==typeof t?null!==t:"function"==typeof t},m=function(t,e){if(!y(t))return t;var r,n;if(e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;if("function"==typeof(r=t.valueOf)&&!y(n=r.call(t)))return n;if(!e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;throw TypeError("Can't convert object to primitive value")},b={}.hasOwnProperty,w=function(t,e){return b.call(t,e)},S=n.document,E=y(S)&&y(S.createElement),x=function(t){return E?S.createElement(t):{}},A=!i&&!o(function(){return 7!=Object.defineProperty(x("div"),"a",{get:function(){return 7}}).a}),O=Object.getOwnPropertyDescriptor,R={f:i?O:function(t,e){if(t=g(t),e=m(e,!0),A)try{return O(t,e)}catch(t){}if(w(t,e))return f(!c.f.call(t,e),t[e])}},j=function(t){if(!y(t))throw TypeError(String(t)+" is not an object");return t},P=Object.defineProperty,I={f:i?P:function(t,e,r){if(j(t),e=m(e,!0),j(r),A)try{return P(t,e,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported");return"value"in r&&(t[e]=r.value),t}},T=i?function(t,e,r){return I.f(t,e,f(1,r))}:function(t,e,r){return t[e]=r,t},k=function(t,e){try{T(n,t,e)}catch(r){n[t]=e}return e},L="__core-js_shared__",U=n[L]||k(L,{}),M=Function.toString;"function"!=typeof U.inspectSource&&(U.inspectSource=function(t){return M.call(t)});var _,N,C,F=U.inspectSource,B=n.WeakMap,D="function"==typeof B&&/native code/.test(F(B)),q=!1,z=e(function(t){(t.exports=function(t,e){return U[t]||(U[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.6.5",mode:"global",copyright:"© 2020 Denis Pushkarev (zloirock.ru)"})}),W=0,K=Math.random(),G=function(t){return"Symbol("+String(void 0===t?"":t)+")_"+(++W+K).toString(36)},$=z("keys"),V=function(t){return $[t]||($[t]=G(t))},H={};if(D){var X=new(0,n.WeakMap),Y=X.get,J=X.has,Q=X.set;_=function(t,e){return Q.call(X,t,e),e},N=function(t){return Y.call(X,t)||{}},C=function(t){return J.call(X,t)}}else{var Z=V("state");H[Z]=!0,_=function(t,e){return T(t,Z,e),e},N=function(t){return w(t,Z)?t[Z]:{}},C=function(t){return w(t,Z)}}var tt,et={set:_,get:N,has:C,enforce:function(t){return C(t)?N(t):_(t,{})},getterFor:function(t){return function(e){var r;if(!y(e)||(r=N(e)).type!==t)throw TypeError("Incompatible receiver, "+t+" required");return r}}},rt=e(function(t){var e=et.get,r=et.enforce,o=String(String).split("String");(t.exports=function(t,e,i,a){var u=!!a&&!!a.unsafe,s=!!a&&!!a.enumerable,c=!!a&&!!a.noTargetGet;"function"==typeof i&&("string"!=typeof e||w(i,"name")||T(i,"name",e),r(i).source=o.join("string"==typeof e?e:"")),t!==n?(u?!c&&t[e]&&(s=!0):delete t[e],s?t[e]=i:T(t,e,i)):s?t[e]=i:k(e,i)})(Function.prototype,"toString",function(){return"function"==typeof this&&e(this).source||F(this)})}),nt=n,ot=function(t){return"function"==typeof t?t:void 0},it=function(t,e){return arguments.length<2?ot(nt[t])||ot(n[t]):nt[t]&&nt[t][e]||n[t]&&n[t][e]},at=Math.ceil,ut=Math.floor,st=function(t){return isNaN(t=+t)?0:(t>0?ut:at)(t)},ct=Math.min,ft=function(t){return t>0?ct(st(t),9007199254740991):0},lt=Math.max,ht=Math.min,pt=function(t,e){var r=st(t);return r<0?lt(r+e,0):ht(r,e)},dt=function(t){return function(e,r,n){var o,i=g(e),a=ft(i.length),u=pt(n,a);if(t&&r!=r){for(;a>u;)if((o=i[u++])!=o)return!0}else for(;a>u;u++)if((t||u in i)&&i[u]===r)return t||u||0;return!t&&-1}},vt={includes:dt(!0),indexOf:dt(!1)},gt=vt.indexOf,yt=function(t,e){var r,n=g(t),o=0,i=[];for(r in n)!w(H,r)&&w(n,r)&&i.push(r);for(;e.length>o;)w(n,r=e[o++])&&(~gt(i,r)||i.push(r));return i},mt=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],bt=mt.concat("length","prototype"),wt={f:Object.getOwnPropertyNames||function(t){return yt(t,bt)}},St={f:Object.getOwnPropertySymbols},Et=it("Reflect","ownKeys")||function(t){var e=wt.f(j(t)),r=St.f;return r?e.concat(r(t)):e},xt=function(t,e){for(var r=Et(e),n=I.f,o=R.f,i=0;i2?arguments[2]:void 0,u=Mt((void 0===a?n:pt(a,n))-i,n-o),s=1;for(i0;)i in r?r[o]=r[i]:delete r[o],o+=s,i+=s;return r},Nt=!!Object.getOwnPropertySymbols&&!o(function(){return!String(Symbol())}),Ct=Nt&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,Ft=z("wks"),Bt=n.Symbol,Dt=Ct?Bt:Bt&&Bt.withoutSetter||G,qt=function(t){return w(Ft,t)||(Ft[t]=Nt&&w(Bt,t)?Bt[t]:Dt("Symbol."+t)),Ft[t]},zt=Object.keys||function(t){return yt(t,mt)},Wt=i?Object.defineProperties:function(t,e){j(t);for(var r,n=zt(e),o=n.length,i=0;o>i;)I.f(t,r=n[i++],e[r]);return t},Kt=it("document","documentElement"),Gt=V("IE_PROTO"),$t=function(){},Vt=function(t){return"",a=a.removeChild(a.firstChild)):"string"==typeof o.is?a=z.createElement(i,{is:o.is}):(a=z.createElement(i),"select"===i&&(z=a,o.multiple?z.multiple=!0:o.size&&(z.size=o.size))):a=z.createElementNS(a,i),a[tD]=u,a[tR]=o,s(a,u,!1,!1),u.stateNode=a;e:{switch(z=vb(i,o),i){case"dialog":D("cancel",a),D("close",a),_=o;break;case"iframe":case"object":case"embed":D("load",a),_=o;break;case"video":case"audio":for(_=0;_le&&(u.flags|=128,o=!0,Ej(j,!1),u.lanes=4194304)}}else{if(!o){if(null!==(a=Mh(z))){if(u.flags|=128,o=!0,null!==(i=a.updateQueue)&&(u.updateQueue=i,u.flags|=4),Ej(j,!0),null===j.tail&&"hidden"===j.tailMode&&!z.alternate&&!t8)return S(u),null}else 2*eq()-j.renderingStartTime>le&&1073741824!==i&&(u.flags|=128,o=!0,Ej(j,!1),u.lanes=4194304)}j.isBackwards?(z.sibling=u.child,u.child=z):(null!==(i=j.last)?i.sibling=z:u.child=z,j.last=z)}if(null!==j.tail)return u=j.tail,j.rendering=u,j.tail=u.sibling,j.renderingStartTime=eq(),u.sibling=null,i=rv.current,G(rv,o?1&i|2:1&i),u;return S(u),null;case 22:case 23:return Ij(),o=null!==u.memoizedState,null!==a&&null!==a.memoizedState!==o&&(u.flags|=8192),o&&0!=(1&u.mode)?0!=(1073741824&r0)&&(S(u),6&u.subtreeFlags&&(u.flags|=8192)):S(u),null;case 24:case 25:return null}throw Error(p(156,u.tag))}function Jj(a,u){switch(wg(u),u.tag){case 1:return Zf(u.type)&&$f(),65536&(a=u.flags)?(u.flags=-65537&a|128,u):null;case 3:return Jh(),E(tQ),E(tA),Oh(),0!=(65536&(a=u.flags))&&0==(128&a)?(u.flags=-65537&a|128,u):null;case 5:return Lh(u),null;case 13:if(E(rv),null!==(a=u.memoizedState)&&null!==a.dehydrated){if(null===u.alternate)throw Error(p(340));Ig()}return 65536&(a=u.flags)?(u.flags=-65537&a|128,u):null;case 19:return E(rv),null;case 4:return Jh(),null;case 10:return Rg(u.type._context),null;case 22:case 23:return Ij(),null;default:return null}}s=function(a,u){for(var i=u.child;null!==i;){if(5===i.tag||6===i.tag)a.appendChild(i.stateNode);else if(4!==i.tag&&null!==i.child){i.child.return=i,i=i.child;continue}if(i===u)break;for(;null===i.sibling;){if(null===i.return||i.return===u)return;i=i.return}i.sibling.return=i.return,i=i.sibling}},w=function(){},x=function(a,u,i,o){var s=a.memoizedProps;if(s!==o){a=u.stateNode,Hh(rp.current);var w,x=null;switch(i){case"input":s=Ya(a,s),o=Ya(a,o),x=[];break;case"select":s=eS({},s,{value:void 0}),o=eS({},o,{value:void 0}),x=[];break;case"textarea":s=gb(a,s),o=gb(a,o),x=[];break;default:"function"!=typeof s.onClick&&"function"==typeof o.onClick&&(a.onclick=Bf)}for(j in ub(i,o),i=null,s)if(!o.hasOwnProperty(j)&&s.hasOwnProperty(j)&&null!=s[j]){if("style"===j){var C=s[j];for(w in C)C.hasOwnProperty(w)&&(i||(i={}),i[w]="")}else"dangerouslySetInnerHTML"!==j&&"children"!==j&&"suppressContentEditableWarning"!==j&&"suppressHydrationWarning"!==j&&"autoFocus"!==j&&(U.hasOwnProperty(j)?x||(x=[]):(x=x||[]).push(j,null))}for(j in o){var _=o[j];if(C=null!=s?s[j]:void 0,o.hasOwnProperty(j)&&_!==C&&(null!=_||null!=C)){if("style"===j){if(C){for(w in C)!C.hasOwnProperty(w)||_&&_.hasOwnProperty(w)||(i||(i={}),i[w]="");for(w in _)_.hasOwnProperty(w)&&C[w]!==_[w]&&(i||(i={}),i[w]=_[w])}else i||(x||(x=[]),x.push(j,i)),i=_}else"dangerouslySetInnerHTML"===j?(_=_?_.__html:void 0,C=C?C.__html:void 0,null!=_&&C!==_&&(x=x||[]).push(j,_)):"children"===j?"string"!=typeof _&&"number"!=typeof _||(x=x||[]).push(j,""+_):"suppressContentEditableWarning"!==j&&"suppressHydrationWarning"!==j&&(U.hasOwnProperty(j)?(null!=_&&"onScroll"===j&&D("scroll",a),x||C===_||(x=[])):(x=x||[]).push(j,_))}}i&&(x=x||[]).push("style",i);var j=x;(u.updateQueue=j)&&(u.flags|=4)}},C=function(a,u,i,o){i!==o&&(u.flags|=4)};var rU=!1,rV=!1,rW="function"==typeof WeakSet?WeakSet:Set,rA=null;function Mj(a,u){var i=a.ref;if(null!==i){if("function"==typeof i)try{i(null)}catch(i){W(a,u,i)}else i.current=null}}function Nj(a,u,i){try{i()}catch(i){W(a,u,i)}}var rQ=!1;function Pj(a,u){if(tC=nk,Ne(a=Me())){if("selectionStart"in a)var i={start:a.selectionStart,end:a.selectionEnd};else e:{var o=(i=(i=a.ownerDocument)&&i.defaultView||window).getSelection&&i.getSelection();if(o&&0!==o.rangeCount){i=o.anchorNode;var s,w=o.anchorOffset,x=o.focusNode;o=o.focusOffset;try{i.nodeType,x.nodeType}catch(a){i=null;break e}var C=0,_=-1,j=-1,z=0,P=0,U=a,V=null;n:for(;;){for(;U!==i||0!==w&&3!==U.nodeType||(_=C+w),U!==x||0!==o&&3!==U.nodeType||(j=C+o),3===U.nodeType&&(C+=U.nodeValue.length),null!==(s=U.firstChild);)V=U,U=s;for(;;){if(U===a)break n;if(V===i&&++z===w&&(_=C),V===x&&++P===o&&(j=C),null!==(s=U.nextSibling))break;V=(U=V).parentNode}U=s}i=-1===_||-1===j?null:{start:_,end:j}}else i=null}i=i||{start:0,end:0}}else i=null;for(t_={focusedElem:a,selectionRange:i},nk=!1,rA=u;null!==rA;)if(a=(u=rA).child,0!=(1028&u.subtreeFlags)&&null!==a)a.return=u,rA=a;else for(;null!==rA;){u=rA;try{var B=u.alternate;if(0!=(1024&u.flags))switch(u.tag){case 0:case 11:case 15:case 5:case 6:case 4:case 17:break;case 1:if(null!==B){var $=B.memoizedProps,Y=B.memoizedState,Z=u.stateNode,X=Z.getSnapshotBeforeUpdate(u.elementType===u.type?$:Lg(u.type,$),Y);Z.__reactInternalSnapshotBeforeUpdate=X}break;case 3:var ee=u.stateNode.containerInfo;1===ee.nodeType?ee.textContent="":9===ee.nodeType&&ee.documentElement&&ee.removeChild(ee.documentElement);break;default:throw Error(p(163))}}catch(a){W(u,u.return,a)}if(null!==(a=u.sibling)){a.return=u.return,rA=a;break}rA=u.return}return B=rQ,rQ=!1,B}function Qj(a,u,i){var o=u.updateQueue;if(null!==(o=null!==o?o.lastEffect:null)){var s=o=o.next;do{if((s.tag&a)===a){var w=s.destroy;s.destroy=void 0,void 0!==w&&Nj(u,i,w)}s=s.next}while(s!==o)}}function Rj(a,u){if(null!==(u=null!==(u=u.updateQueue)?u.lastEffect:null)){var i=u=u.next;do{if((i.tag&a)===a){var o=i.create;i.destroy=o()}i=i.next}while(i!==u)}}function Sj(a){var u=a.ref;if(null!==u){var i=a.stateNode;a.tag,a=i,"function"==typeof u?u(a):u.current=a}}function Tj(a){var u=a.alternate;null!==u&&(a.alternate=null,Tj(u)),a.child=null,a.deletions=null,a.sibling=null,5===a.tag&&null!==(u=a.stateNode)&&(delete u[tD],delete u[tR],delete u[tI],delete u[tO],delete u[tF]),a.stateNode=null,a.return=null,a.dependencies=null,a.memoizedProps=null,a.memoizedState=null,a.pendingProps=null,a.stateNode=null,a.updateQueue=null}function Uj(a){return 5===a.tag||3===a.tag||4===a.tag}function Vj(a){e:for(;;){for(;null===a.sibling;){if(null===a.return||Uj(a.return))return null;a=a.return}for(a.sibling.return=a.return,a=a.sibling;5!==a.tag&&6!==a.tag&&18!==a.tag;){if(2&a.flags||null===a.child||4===a.tag)continue e;a.child.return=a,a=a.child}if(!(2&a.flags))return a.stateNode}}function Wj(a,u,i){var o=a.tag;if(5===o||6===o)a=a.stateNode,u?8===i.nodeType?i.parentNode.insertBefore(a,u):i.insertBefore(a,u):(8===i.nodeType?(u=i.parentNode).insertBefore(a,i):(u=i).appendChild(a),null!=(i=i._reactRootContainer)||null!==u.onclick||(u.onclick=Bf));else if(4!==o&&null!==(a=a.child))for(Wj(a,u,i),a=a.sibling;null!==a;)Wj(a,u,i),a=a.sibling}function Xj(a,u,i){var o=a.tag;if(5===o||6===o)a=a.stateNode,u?i.insertBefore(a,u):i.appendChild(a);else if(4!==o&&null!==(a=a.child))for(Xj(a,u,i),a=a.sibling;null!==a;)Xj(a,u,i),a=a.sibling}var rB=null,r$=!1;function Zj(a,u,i){for(i=i.child;null!==i;)ak(a,u,i),i=i.sibling}function ak(a,u,i){if(e2&&"function"==typeof e2.onCommitFiberUnmount)try{e2.onCommitFiberUnmount(e1,i)}catch(a){}switch(i.tag){case 5:rV||Mj(i,u);case 6:var o=rB,s=r$;rB=null,Zj(a,u,i),rB=o,r$=s,null!==rB&&(r$?(a=rB,i=i.stateNode,8===a.nodeType?a.parentNode.removeChild(i):a.removeChild(i)):rB.removeChild(i.stateNode));break;case 18:null!==rB&&(r$?(a=rB,i=i.stateNode,8===a.nodeType?Kf(a.parentNode,i):1===a.nodeType&&Kf(a,i),bd(a)):Kf(rB,i.stateNode));break;case 4:o=rB,s=r$,rB=i.stateNode.containerInfo,r$=!0,Zj(a,u,i),rB=o,r$=s;break;case 0:case 11:case 14:case 15:if(!rV&&null!==(o=i.updateQueue)&&null!==(o=o.lastEffect)){s=o=o.next;do{var w=s,x=w.destroy;w=w.tag,void 0!==x&&(0!=(2&w)?Nj(i,u,x):0!=(4&w)&&Nj(i,u,x)),s=s.next}while(s!==o)}Zj(a,u,i);break;case 1:if(!rV&&(Mj(i,u),"function"==typeof(o=i.stateNode).componentWillUnmount))try{o.props=i.memoizedProps,o.state=i.memoizedState,o.componentWillUnmount()}catch(a){W(i,u,a)}Zj(a,u,i);break;case 21:default:Zj(a,u,i);break;case 22:1&i.mode?(rV=(o=rV)||null!==i.memoizedState,Zj(a,u,i),rV=o):Zj(a,u,i)}}function bk(a){var u=a.updateQueue;if(null!==u){a.updateQueue=null;var i=a.stateNode;null===i&&(i=a.stateNode=new rW),u.forEach(function(u){var o=ck.bind(null,a,u);i.has(u)||(i.add(u),u.then(o,o))})}}function dk(a,u){var i=u.deletions;if(null!==i)for(var o=0;os&&(s=x),o&=~w}if(o=s,10<(o=(120>(o=eq()-o)?120:480>o?480:1080>o?1080:1920>o?1920:3e3>o?3e3:4320>o?4320:1960*rH(o/1960))-o)){a.timeoutHandle=tL(Qk.bind(null,a,r9,ln),o);break}Qk(a,r9,ln);break;default:throw Error(p(329))}}}return Ek(a,eq()),a.callbackNode===i?Hk.bind(null,a):null}function Ok(a,u){var i=r6;return a.current.memoizedState.isDehydrated&&(Lk(a,u).flags|=256),2!==(a=Jk(a,u))&&(u=r9,r9=i,null!==u&&Gj(u)),a}function Gj(a){null===r9?r9=a:r9.push.apply(r9,a)}function Pk(a){for(var u=a;;){if(16384&u.flags){var i=u.updateQueue;if(null!==i&&null!==(i=i.stores))for(var o=0;oa?16:a,null===lu)var o=!1;else{if(a=lu,lu=null,lo=0,0!=(6&rY))throw Error(p(331));var s=rY;for(rY|=4,rA=a.current;null!==rA;){var w=rA,x=w.child;if(0!=(16&rA.flags)){var C=w.deletions;if(null!==C){for(var _=0;_eq()-r7?Lk(a,0):r5|=i),Ek(a,u)}function Zk(a,u){0===u&&(0==(1&a.mode)?u=1:(u=e6,0==(130023424&(e6<<=1))&&(e6=4194304)));var i=L();null!==(a=Zg(a,u))&&(Ac(a,u,i),Ek(a,i))}function vj(a){var u=a.memoizedState,i=0;null!==u&&(i=u.retryLane),Zk(a,i)}function ck(a,u){var i=0;switch(a.tag){case 13:var o=a.stateNode,s=a.memoizedState;null!==s&&(i=s.retryLane);break;case 19:o=a.stateNode;break;default:throw Error(p(314))}null!==o&&o.delete(u),Zk(a,i)}function al(a,u,i,o){this.tag=a,this.key=i,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=u,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=o,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Bg(a,u,i,o){return new al(a,u,i,o)}function bj(a){return!(!(a=a.prototype)||!a.isReactComponent)}function $k(a){if("function"==typeof a)return bj(a)?1:0;if(null!=a){if((a=a.$$typeof)===ef)return 11;if(a===em)return 14}return 2}function wh(a,u){var i=a.alternate;return null===i?((i=Bg(a.tag,u,a.key,a.mode)).elementType=a.elementType,i.type=a.type,i.stateNode=a.stateNode,i.alternate=a,a.alternate=i):(i.pendingProps=u,i.type=a.type,i.flags=0,i.subtreeFlags=0,i.deletions=null),i.flags=14680064&a.flags,i.childLanes=a.childLanes,i.lanes=a.lanes,i.child=a.child,i.memoizedProps=a.memoizedProps,i.memoizedState=a.memoizedState,i.updateQueue=a.updateQueue,u=a.dependencies,i.dependencies=null===u?null:{lanes:u.lanes,firstContext:u.firstContext},i.sibling=a.sibling,i.index=a.index,i.ref=a.ref,i}function yh(a,u,i,o,s,w){var x=2;if(o=a,"function"==typeof a)bj(a)&&(x=1);else if("string"==typeof a)x=5;else e:switch(a){case ea:return Ah(i.children,s,w,u);case eu:x=8,s|=8;break;case eo:return(a=Bg(12,i,u,2|s)).elementType=eo,a.lanes=w,a;case ep:return(a=Bg(13,i,u,s)).elementType=ep,a.lanes=w,a;case eg:return(a=Bg(19,i,u,s)).elementType=eg,a.lanes=w,a;case eb:return qj(i,s,w,u);default:if("object"==typeof a&&null!==a)switch(a.$$typeof){case es:x=10;break e;case ec:x=9;break e;case ef:x=11;break e;case em:x=14;break e;case ev:x=16,o=null;break e}throw Error(p(130,null==a?a:typeof a,""))}return(u=Bg(x,i,u,s)).elementType=a,u.type=o,u.lanes=w,u}function Ah(a,u,i,o){return(a=Bg(7,a,o,u)).lanes=i,a}function qj(a,u,i,o){return(a=Bg(22,a,o,u)).elementType=eb,a.lanes=i,a.stateNode={isHidden:!1},a}function xh(a,u,i){return(a=Bg(6,a,null,u)).lanes=i,a}function zh(a,u,i){return(u=Bg(4,null!==a.children?a.children:[],a.key,u)).lanes=i,u.stateNode={containerInfo:a.containerInfo,pendingChildren:null,implementation:a.implementation},u}function bl(a,u,i,o,s){this.tag=u,this.containerInfo=a,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=zc(0),this.expirationTimes=zc(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=zc(0),this.identifierPrefix=o,this.onRecoverableError=s,this.mutableSourceEagerHydrationData=null}function cl(a,u,i,o,s,w,x,C,_){return a=new bl(a,u,i,C,_),1===u?(u=1,!0===w&&(u|=8)):u=0,w=Bg(3,null,null,u),a.current=w,w.stateNode=a,w.memoizedState={element:o,isDehydrated:i,cache:null,transitions:null,pendingSuspenseBoundaries:null},ah(w),a}function dl(a,u,i){var o=3>>1,s=a[o];if(0>>1;og(C,i))_g(j,C)?(a[o]=j,a[_]=i,o=_):(a[o]=C,a[x]=i,o=x);else if(_g(j,i))a[o]=j,a[_]=i,o=_;else break}}return u}function g(a,u){var i=a.sortIndex-u.sortIndex;return 0!==i?i:a.id-u.id}if("object"==typeof performance&&"function"==typeof performance.now){var i,o=performance;u.unstable_now=function(){return o.now()}}else{var s=Date,w=s.now();u.unstable_now=function(){return s.now()-w}}var x=[],C=[],_=1,j=null,z=3,P=!1,U=!1,V=!1,B="function"==typeof setTimeout?setTimeout:null,$="function"==typeof clearTimeout?clearTimeout:null,Y="undefined"!=typeof setImmediate?setImmediate:null;function G(a){for(var u=h(C);null!==u;){if(null===u.callback)k(C);else if(u.startTime<=a)k(C),u.sortIndex=u.expirationTime,f(x,u);else break;u=h(C)}}function H(a){if(V=!1,G(a),!U){if(null!==h(x))U=!0,I(J);else{var u=h(C);null!==u&&K(H,u.startTime-a)}}}function J(a,i){U=!1,V&&(V=!1,$(ee),ee=-1),P=!0;var o=z;try{for(G(i),j=h(x);null!==j&&(!(j.expirationTime>i)||a&&!M());){var s=j.callback;if("function"==typeof s){j.callback=null,z=j.priorityLevel;var w=s(j.expirationTime<=i);i=u.unstable_now(),"function"==typeof w?j.callback=w:j===h(x)&&k(x),G(i)}else k(x);j=h(x)}if(null!==j)var _=!0;else{var B=h(C);null!==B&&K(H,B.startTime-i),_=!1}return _}finally{j=null,z=o,P=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var Z=!1,X=null,ee=-1,en=5,et=-1;function M(){return!(u.unstable_now()-eta||125s?(a.sortIndex=o,f(C,a),null===h(x)&&a===h(C)&&(V?($(ee),ee=-1):V=!0,K(H,o-s))):(a.sortIndex=w,f(x,a),U||P||(U=!0,I(J))),a},u.unstable_shouldYield=M,u.unstable_wrapCallback=function(a){var u=z;return function(){var i=z;z=u;try{return a.apply(this,arguments)}finally{z=i}}}},3840:function(a,u,i){a.exports=i(53)}}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[179],{3143:function(){"use strict";try{self["workbox:window:6.5.4"]&&_()}catch(l){}function n(l,d){return new Promise(function(f){var h=new MessageChannel;h.port1.onmessage=function(l){f(l.data)},l.postMessage(d,[h.port2])})}function t(l,d){for(var f=0;fl.length)&&(d=l.length);for(var f=0,h=Array(d);f=l.length?{done:!0}:{done:!1,value:l[h++]}}}throw TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}return(f=l[Symbol.iterator]()).next.bind(f)}try{self["workbox:core:6.5.4"]&&_()}catch(l){}var i=function(){var l=this;this.promise=new Promise(function(d,f){l.resolve=d,l.reject=f})};function o(l,d){var f=location.href;return new URL(l,f).href===new URL(d,f).href}var u=function(l,d){this.type=l,Object.assign(this,d)};function a(l,d,f){return f?d?d(l):l:(l&&l.then||(l=Promise.resolve(l)),d?l.then(d):l)}function c(){}var l={type:"SKIP_WAITING"};function s(l,d){if(!d)return l&&l.then?l.then(c):Promise.resolve()}var d=function(d){function v(l,f){var h,g;return void 0===f&&(f={}),(h=d.call(this)||this).nn={},h.tn=0,h.rn=new i,h.en=new i,h.on=new i,h.un=0,h.an=new Set,h.cn=function(){var l=h.fn,d=l.installing;h.tn>0||!o(d.scriptURL,h.sn.toString())||performance.now()>h.un+6e4?(h.vn=d,l.removeEventListener("updatefound",h.cn)):(h.hn=d,h.an.add(d),h.rn.resolve(d)),++h.tn,d.addEventListener("statechange",h.ln)},h.ln=function(l){var d=h.fn,f=l.target,g=f.state,y=f===h.vn,P={sw:f,isExternal:y,originalEvent:l};!y&&h.mn&&(P.isUpdate=!0),h.dispatchEvent(new u(g,P)),"installed"===g?h.wn=self.setTimeout(function(){"installed"===g&&d.waiting===f&&h.dispatchEvent(new u("waiting",P))},200):"activating"===g&&(clearTimeout(h.wn),y||h.en.resolve(f))},h.dn=function(l){var d=h.hn,f=d!==navigator.serviceWorker.controller;h.dispatchEvent(new u("controlling",{isExternal:f,originalEvent:l,sw:d,isUpdate:h.mn})),f||h.on.resolve(d)},h.gn=(g=function(l){var d=l.data,f=l.ports,g=l.source;return a(h.getSW(),function(){h.an.has(g)&&h.dispatchEvent(new u("message",{data:d,originalEvent:l,ports:f,sw:g}))})},function(){for(var l=[],d=0;dl.put("/",new Response("",{status:200})))}),window.workbox=new d(window.location.origin+"/sw.js",{scope:"/"}),window.workbox.addEventListener("installed",async({isUpdate:l})=>{if(!l){let l=await caches.open("start-url"),d=await fetch("/"),f=d;d.redirected&&(f=new Response(d.body,{status:200,statusText:"OK",headers:d.headers})),await l.put("/",f)}}),window.workbox.addEventListener("installed",async()=>{let l=window.performance.getEntriesByType("resource").map(l=>l.name).filter(l=>l.startsWith(`${window.location.origin}/_next/data/`)&&l.endsWith(".json")),d=await caches.open("next-data");l.forEach(l=>d.add(l))}),window.workbox.register();{let cacheOnFrontEndNav=function(l){if(window.navigator.onLine&&"/"===l)return fetch("/").then(function(l){return l.redirected?Promise.resolve():caches.open("start-url").then(d=>d.put("/",l))})},l=history.pushState;history.pushState=function(){l.apply(history,arguments),cacheOnFrontEndNav(arguments[2])};let d=history.replaceState;history.replaceState=function(){d.apply(history,arguments),cacheOnFrontEndNav(arguments[2])},window.addEventListener("online",()=>{cacheOnFrontEndNav(window.location.pathname)})}window.addEventListener("online",()=>{location.reload()})}},4878:function(l,d){"use strict";function getDeploymentIdQueryOrEmptyString(){return""}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getDeploymentIdQueryOrEmptyString",{enumerable:!0,get:function(){return getDeploymentIdQueryOrEmptyString}})},37:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var l=/\((.*)\)/.exec(this.toString());return l?l[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(l,d){return d=this.concat.apply([],this),l>1&&d.some(Array.isArray)?d.flat(l-1):d},Array.prototype.flatMap=function(l,d){return this.map(l,d).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(l){if("function"!=typeof l)return this.then(l,l);var d=this.constructor||Promise;return this.then(function(f){return d.resolve(l()).then(function(){return f})},function(f){return d.resolve(l()).then(function(){throw f})})}),Object.fromEntries||(Object.fromEntries=function(l){return Array.from(l).reduce(function(l,d){return l[d[0]]=d[1],l},{})}),Array.prototype.at||(Array.prototype.at=function(l){var d=Math.trunc(l)||0;if(d<0&&(d+=this.length),!(d<0||d>=this.length))return this[d]})},7192:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addBasePath",{enumerable:!0,get:function(){return addBasePath}});let h=f(6063),g=f(2866);function addBasePath(l,d){return(0,g.normalizePathTrailingSlash)((0,h.addPathPrefix)(l,""))}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3607:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addLocale",{enumerable:!0,get:function(){return addLocale}}),f(2866);let addLocale=function(l){for(var d=arguments.length,f=Array(d>1?d-1:0),h=1;h25){window.location.reload();return}clearTimeout(d),d=setTimeout(init,g>5?5e3:1e3)}f&&f.close();let{hostname:y,port:P}=location,b=getSocketProtocol(l.assetPrefix||""),E=l.assetPrefix.replace(/^\/+/,""),S=b+"://"+y+":"+P+(E?"/"+E:"");E.startsWith("http")&&(S=b+"://"+E.split("://",2)[1]),(f=new window.WebSocket(""+S+l.path)).onopen=handleOnline,f.onerror=handleDisconnect,f.onclose=handleDisconnect,f.onmessage=handleMessage}init()}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6864:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"hasBasePath",{enumerable:!0,get:function(){return hasBasePath}});let h=f(387);function hasBasePath(l){return(0,h.pathHasPrefix)(l,"")}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6623:function(l,d){"use strict";let f;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{DOMAttributeNames:function(){return h},isEqualNode:function(){return isEqualNode},default:function(){return initHeadManager}});let h={acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv",noModule:"noModule"};function reactElementToDOM(l){let{type:d,props:f}=l,g=document.createElement(d);for(let l in f){if(!f.hasOwnProperty(l)||"children"===l||"dangerouslySetInnerHTML"===l||void 0===f[l])continue;let y=h[l]||l.toLowerCase();"script"===d&&("async"===y||"defer"===y||"noModule"===y)?g[y]=!!f[l]:g.setAttribute(y,f[l])}let{children:y,dangerouslySetInnerHTML:P}=f;return P?g.innerHTML=P.__html||"":y&&(g.textContent="string"==typeof y?y:Array.isArray(y)?y.join(""):""),g}function isEqualNode(l,d){if(l instanceof HTMLElement&&d instanceof HTMLElement){let f=d.getAttribute("nonce");if(f&&!l.getAttribute("nonce")){let h=d.cloneNode(!0);return h.setAttribute("nonce",""),h.nonce=f,f===l.nonce&&l.isEqualNode(h)}}return l.isEqualNode(d)}function initHeadManager(){return{mountedInstances:new Set,updateHead:l=>{let d={};l.forEach(l=>{if("link"===l.type&&l.props["data-optimized-fonts"]){if(document.querySelector('style[data-href="'+l.props["data-href"]+'"]'))return;l.props.href=l.props["data-href"],l.props["data-href"]=void 0}let f=d[l.type]||[];f.push(l),d[l.type]=f});let h=d.title?d.title[0]:null,g="";if(h){let{children:l}=h.props;g="string"==typeof l?l:Array.isArray(l)?l.join(""):""}g!==document.title&&(document.title=g),["meta","base","link","style","script"].forEach(l=>{f(l,d[l]||[])})}}}f=(l,d)=>{let f=document.getElementsByTagName("head")[0],h=f.querySelector("meta[name=next-head-count]"),g=Number(h.content),y=[];for(let d=0,f=h.previousElementSibling;d{for(let d=0,f=y.length;d{var d;return null==(d=l.parentNode)?void 0:d.removeChild(l)}),b.forEach(l=>f.insertBefore(l,h)),h.content=(g-y.length+b.length).toString()},("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1078:function(l,d,f){"use strict";let h,g,y,P,b,E,S,w,R,O,j,A;Object.defineProperty(d,"__esModule",{value:!0});let M=f(1757);Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{version:function(){return ea},router:function(){return h},emitter:function(){return eo},initialize:function(){return initialize},hydrate:function(){return hydrate}});let C=f(8754);f(37);let I=C._(f(7294)),L=C._(f(745)),x=f(6734),N=C._(f(6860)),D=f(1823),k=f(3937),F=f(9203),U=f(5980),H=f(5612),B=f(109),W=f(4511),q=C._(f(6623)),z=C._(f(804)),G=C._(f(2891)),V=f(8099),X=f(9974),K=f(676),Y=f(869),Q=f(8961),$=f(6864),J=f(9031),Z=f(9642),ee=f(1593),et=C._(f(80)),er=C._(f(5944)),en=C._(f(5677)),ea="14.0.1",eo=(0,N.default)(),looseToArray=l=>[].slice.call(l),ei=!1;let Container=class Container extends I.default.Component{componentDidCatch(l,d){this.props.fn(l,d)}componentDidMount(){this.scrollToHash(),h.isSsr&&(g.isFallback||g.nextExport&&((0,F.isDynamicRoute)(h.pathname)||location.search||ei)||g.props&&g.props.__N_SSG&&(location.search||ei))&&h.replace(h.pathname+"?"+String((0,U.assign)((0,U.urlQueryToSearchParams)(h.query),new URLSearchParams(location.search))),y,{_h:1,shallow:!g.isFallback&&!ei}).catch(l=>{if(!l.cancelled)throw l})}componentDidUpdate(){this.scrollToHash()}scrollToHash(){let{hash:l}=location;if(!(l=l&&l.substring(1)))return;let d=document.getElementById(l);d&&setTimeout(()=>d.scrollIntoView(),0)}render(){return this.props.children}};async function initialize(l){void 0===l&&(l={}),er.default.onSpanEnd(en.default),g=JSON.parse(document.getElementById("__NEXT_DATA__").textContent),window.__NEXT_DATA__=g,A=g.defaultLocale;let d=g.assetPrefix||"";if(self.__next_set_public_path__(""+d+"/_next/"),(0,H.setConfig)({serverRuntimeConfig:{},publicRuntimeConfig:g.runtimeConfig||{}}),y=(0,B.getURL)(),(0,$.hasBasePath)(y)&&(y=(0,Q.removeBasePath)(y)),g.scriptLoader){let{initScriptLoader:l}=f(5354);l(g.scriptLoader)}P=new z.default(g.buildId,d);let register=l=>{let[d,f]=l;return P.routeLoader.onEntrypoint(d,f)};return window.__NEXT_P&&window.__NEXT_P.map(l=>setTimeout(()=>register(l),0)),window.__NEXT_P=[],window.__NEXT_P.push=register,(E=(0,q.default)()).getIsSsr=()=>h.isSsr,b=document.getElementById("__next"),{assetPrefix:d}}function renderApp(l,d){return I.default.createElement(l,d)}function AppContainer(l){var d;let{children:f}=l,g=I.default.useMemo(()=>(0,Z.adaptForAppRouterInstance)(h),[]);return I.default.createElement(Container,{fn:l=>renderError({App:R,err:l}).catch(l=>console.error("Error rendering page: ",l))},I.default.createElement(J.AppRouterContext.Provider,{value:g},I.default.createElement(ee.SearchParamsContext.Provider,{value:(0,Z.adaptForSearchParams)(h)},I.default.createElement(Z.PathnameContextProviderAdapter,{router:h,isAutoExport:null!=(d=self.__NEXT_DATA__.autoExport)&&d},I.default.createElement(ee.PathParamsContext.Provider,{value:(0,Z.adaptForPathParams)(h)},I.default.createElement(D.RouterContext.Provider,{value:(0,X.makePublicRouterInstance)(h)},I.default.createElement(x.HeadManagerContext.Provider,{value:E},I.default.createElement(Y.ImageConfigContext.Provider,{value:{deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1}},f))))))))}let wrapApp=l=>d=>{let f={...d,Component:j,err:g.err,router:h};return I.default.createElement(AppContainer,null,renderApp(l,f))};function renderError(l){let{App:d,err:b}=l;return console.error(b),console.error("A client-side exception has occurred, see here for more info: https://nextjs.org/docs/messages/client-side-exception-occurred"),P.loadPage("/_error").then(h=>{let{page:g,styleSheets:y}=h;return(null==S?void 0:S.Component)===g?Promise.resolve().then(()=>M._(f(6908))).then(h=>Promise.resolve().then(()=>M._(f(1337))).then(f=>(d=f.default,l.App=d,h))).then(l=>({ErrorComponent:l.default,styleSheets:[]})):{ErrorComponent:g,styleSheets:y}}).then(f=>{var P;let{ErrorComponent:E,styleSheets:S}=f,w=wrapApp(d),R={Component:E,AppTree:w,router:h,ctx:{err:b,pathname:g.page,query:g.query,asPath:y,AppTree:w}};return Promise.resolve((null==(P=l.props)?void 0:P.err)?l.props:(0,B.loadGetInitialProps)(d,R)).then(d=>doRender({...l,err:b,Component:E,styleSheets:S,props:d}))})}function Head(l){let{callback:d}=l;return I.default.useLayoutEffect(()=>d(),[d]),null}let el={navigationStart:"navigationStart",beforeRender:"beforeRender",afterRender:"afterRender",afterHydrate:"afterHydrate",routeChange:"routeChange"},eu={hydration:"Next.js-hydration",beforeHydration:"Next.js-before-hydration",routeChangeToRender:"Next.js-route-change-to-render",render:"Next.js-render"},es=null,ec=!0;function clearMarks(){[el.beforeRender,el.afterHydrate,el.afterRender,el.routeChange].forEach(l=>performance.clearMarks(l))}function markHydrateComplete(){if(!B.ST)return;performance.mark(el.afterHydrate);let l=performance.getEntriesByName(el.beforeRender,"mark").length;l&&(performance.measure(eu.beforeHydration,el.navigationStart,el.beforeRender),performance.measure(eu.hydration,el.beforeRender,el.afterHydrate)),O&&performance.getEntriesByName(eu.hydration).forEach(O),clearMarks()}function markRenderComplete(){if(!B.ST)return;performance.mark(el.afterRender);let l=performance.getEntriesByName(el.routeChange,"mark");if(!l.length)return;let d=performance.getEntriesByName(el.beforeRender,"mark").length;d&&(performance.measure(eu.routeChangeToRender,l[0].name,el.beforeRender),performance.measure(eu.render,el.beforeRender,el.afterRender),O&&(performance.getEntriesByName(eu.render).forEach(O),performance.getEntriesByName(eu.routeChangeToRender).forEach(O))),clearMarks(),[eu.routeChangeToRender,eu.render].forEach(l=>performance.clearMeasures(l))}function renderReactElement(l,d){B.ST&&performance.mark(el.beforeRender);let f=d(ec?markHydrateComplete:markRenderComplete);if(es){let l=I.default.startTransition;l(()=>{es.render(f)})}else es=L.default.hydrateRoot(l,f,{onRecoverableError:et.default}),ec=!1}function Root(l){let{callbacks:d,children:f}=l;return I.default.useLayoutEffect(()=>d.forEach(l=>l()),[d]),I.default.useEffect(()=>{(0,G.default)(O)},[]),f}function doRender(l){let d,{App:f,Component:g,props:y,err:P}=l,E="initial"in l?void 0:l.styleSheets;g=g||S.Component,y=y||S.props;let R={...y,Component:g,err:P,router:h};S=R;let O=!1,j=new Promise((l,f)=>{w&&w(),d=()=>{w=null,l()},w=()=>{O=!0,w=null;let l=Error("Cancel rendering route");l.cancelled=!0,f(l)}});function onHeadCommit(){if(E&&!O){let l=new Set(E.map(l=>l.href)),d=looseToArray(document.querySelectorAll("style[data-n-href]")),f=d.map(l=>l.getAttribute("data-n-href"));for(let h=0;h{let{href:d}=l,f=document.querySelector('style[data-n-href="'+d+'"]');f&&(h.parentNode.insertBefore(f,h.nextSibling),h=f)}),looseToArray(document.querySelectorAll("link[data-n-p]")).forEach(l=>{l.parentNode.removeChild(l)})}if(l.scroll){let{x:d,y:f}=l.scroll;(0,k.handleSmoothScroll)(()=>{window.scrollTo(d,f)})}}function onRootCommit(){d()}!function(){if(!E)return;let l=looseToArray(document.querySelectorAll("style[data-n-href]")),d=new Set(l.map(l=>l.getAttribute("data-n-href"))),f=document.querySelector("noscript[data-n-css]"),h=null==f?void 0:f.getAttribute("data-n-css");E.forEach(l=>{let{href:f,text:g}=l;if(!d.has(f)){let l=document.createElement("style");l.setAttribute("data-n-href",f),l.setAttribute("media","x"),h&&l.setAttribute("nonce",h),document.head.appendChild(l),l.appendChild(document.createTextNode(g))}})}();let A=I.default.createElement(I.default.Fragment,null,I.default.createElement(Head,{callback:onHeadCommit}),I.default.createElement(AppContainer,null,renderApp(f,R),I.default.createElement(W.Portal,{type:"next-route-announcer"},I.default.createElement(V.RouteAnnouncer,null))));return renderReactElement(b,l=>I.default.createElement(Root,{callbacks:[l,onRootCommit]},A)),j}async function render(l){if(l.err){await renderError(l);return}try{await doRender(l)}catch(f){let d=(0,K.getProperError)(f);if(d.cancelled)throw d;await renderError({...l,err:d})}}async function hydrate(l){let d=g.err;try{let l=await P.routeLoader.whenEntrypoint("/_app");if("error"in l)throw l.error;let{component:d,exports:f}=l;R=d,f&&f.reportWebVitals&&(O=l=>{let d,{id:h,name:g,startTime:y,value:P,duration:b,entryType:E,entries:S,attribution:w}=l,R=Date.now()+"-"+(Math.floor(Math.random()*(9e12-1))+1e12);S&&S.length&&(d=S[0].startTime);let O={id:h||R,name:g,startTime:y||d,value:null==P?b:P,label:"mark"===E||"measure"===E?"custom":"web-vital"};w&&(O.attribution=w),f.reportWebVitals(O)});let h=await P.routeLoader.whenEntrypoint(g.page);if("error"in h)throw h.error;j=h.component}catch(l){d=(0,K.getProperError)(l)}window.__NEXT_PRELOADREADY&&await window.__NEXT_PRELOADREADY(g.dynamicIds),h=(0,X.createRouter)(g.page,g.query,y,{initialProps:g.props,pageLoader:P,App:R,Component:j,wrapApp,err:d,isFallback:!!g.isFallback,subscription:(l,d,f)=>render(Object.assign({},l,{App:d,scroll:f})),locale:g.locale,locales:g.locales,defaultLocale:A,domainLocales:g.domainLocales,isPreview:g.isPreview}),ei=await h._initialMatchesMiddlewarePromise;let f={App:R,initial:!0,Component:j,props:g.props,err:d};(null==l?void 0:l.beforeRender)&&await l.beforeRender(),render(f)}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6003:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),f(3737);let h=f(1078);window.next={version:h.version,get router(){return h.router},emitter:h.emitter},(0,h.initialize)({}).then(()=>(0,h.hydrate)()).catch(console.error),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},2866:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return normalizePathTrailingSlash}});let h=f(7425),g=f(1156),normalizePathTrailingSlash=l=>{if(!l.startsWith("/"))return l;let{pathname:d,query:f,hash:y}=(0,g.parsePath)(l);return""+(0,h.removeTrailingSlash)(d)+f+y};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},80:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return onRecoverableError}});let h=f(6146);function onRecoverableError(l){let d="function"==typeof reportError?reportError:l=>{window.console.error(l)};l.digest!==h.NEXT_DYNAMIC_NO_SSR_CODE&&d(l)}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},804:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return PageLoader}});let h=f(8754),g=f(7192),y=f(2969),P=h._(f(8356)),b=f(3607),E=f(9203),S=f(1748),w=f(7425),R=f(769);f(2338);let PageLoader=class PageLoader{getPageList(){return(0,R.getClientBuildManifest)().then(l=>l.sortedPages)}getMiddleware(){return window.__MIDDLEWARE_MATCHERS=[],window.__MIDDLEWARE_MATCHERS}getDataHref(l){let{asPath:d,href:f,locale:h}=l,{pathname:R,query:O,search:j}=(0,S.parseRelativeUrl)(f),{pathname:A}=(0,S.parseRelativeUrl)(d),M=(0,w.removeTrailingSlash)(R);if("/"!==M[0])throw Error('Route name should start with a "/", got "'+M+'"');return(l=>{let d=(0,P.default)((0,w.removeTrailingSlash)((0,b.addLocale)(l,h)),".json");return(0,g.addBasePath)("/_next/data/"+this.buildId+d+j,!0)})(l.skipInterpolation?A:(0,E.isDynamicRoute)(M)?(0,y.interpolateAs)(R,A,O).result:M)}_isSsg(l){return this.promisedSsgManifest.then(d=>d.has(l))}loadPage(l){return this.routeLoader.loadRoute(l).then(l=>{if("component"in l)return{page:l.component,mod:l.exports,styleSheets:l.styles.map(l=>({href:l.href,text:l.content}))};throw l.error})}prefetch(l){return this.routeLoader.prefetch(l)}constructor(l,d){this.routeLoader=(0,R.createRouteLoader)(d),this.buildId=l,this.assetPrefix=d,this.promisedSsgManifest=new Promise(l=>{window.__SSG_MANIFEST?l(window.__SSG_MANIFEST):window.__SSG_MANIFEST_CB=()=>{l(window.__SSG_MANIFEST)}})}};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},2891:function(l,d,f){"use strict";let h;Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return _default}});let g=["CLS","FCP","FID","INP","LCP","TTFB"];location.href;let y=!1;function onReport(l){h&&h(l)}let _default=l=>{if(h=l,!y)for(let l of(y=!0,g))try{let d;d||(d=f(8018)),d["on"+l](onReport)}catch(d){console.warn("Failed to track "+l+" web-vital",d)}};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},4511:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"Portal",{enumerable:!0,get:function(){return Portal}});let h=f(7294),g=f(3935),Portal=l=>{let{children:d,type:f}=l,[y,P]=(0,h.useState)(null);return(0,h.useEffect)(()=>{let l=document.createElement(f);return document.body.appendChild(l),P(l),()=>{document.body.removeChild(l)}},[f]),y?(0,g.createPortal)(d,y):null};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},8961:function(l,d,f){"use strict";function removeBasePath(l){return l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeBasePath",{enumerable:!0,get:function(){return removeBasePath}}),f(6864),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5637:function(l,d,f){"use strict";function removeLocale(l,d){return l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeLocale",{enumerable:!0,get:function(){return removeLocale}}),f(1156),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3436:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{requestIdleCallback:function(){return f},cancelIdleCallback:function(){return h}});let f="undefined"!=typeof self&&self.requestIdleCallback&&self.requestIdleCallback.bind(window)||function(l){let d=Date.now();return self.setTimeout(function(){l({didTimeout:!1,timeRemaining:function(){return Math.max(0,50-(Date.now()-d))}})},1)},h="undefined"!=typeof self&&self.cancelIdleCallback&&self.cancelIdleCallback.bind(window)||function(l){return clearTimeout(l)};("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},4450:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"resolveHref",{enumerable:!0,get:function(){return resolveHref}});let h=f(5980),g=f(4364),y=f(6455),P=f(109),b=f(2866),E=f(2227),S=f(8410),w=f(2969);function resolveHref(l,d,f){let R;let O="string"==typeof d?d:(0,g.formatWithValidation)(d),j=O.match(/^[a-zA-Z]{1,}:\/\//),A=j?O.slice(j[0].length):O,M=A.split("?",1);if((M[0]||"").match(/(\/\/|\\)/)){console.error("Invalid href '"+O+"' passed to next/router in page: '"+l.pathname+"'. Repeated forward-slashes (//) or backslashes \\ are not valid in the href.");let d=(0,P.normalizeRepeatedSlashes)(A);O=(j?j[0]:"")+d}if(!(0,E.isLocalURL)(O))return f?[O]:O;try{R=new URL(O.startsWith("#")?l.asPath:l.pathname,"http://n")}catch(l){R=new URL("/","http://n")}try{let l=new URL(O,R);l.pathname=(0,b.normalizePathTrailingSlash)(l.pathname);let d="";if((0,S.isDynamicRoute)(l.pathname)&&l.searchParams&&f){let f=(0,h.searchParamsToUrlQuery)(l.searchParams),{result:P,params:b}=(0,w.interpolateAs)(l.pathname,l.pathname,f);P&&(d=(0,g.formatWithValidation)({pathname:P,hash:l.hash,query:(0,y.omit)(f,b)}))}let P=l.origin===R.origin?l.href.slice(l.origin.length):l.href;return f?[P,d||P]:P}catch(l){return f?[O]:O}}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},8099:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{RouteAnnouncer:function(){return RouteAnnouncer},default:function(){return b}});let h=f(8754),g=h._(f(7294)),y=f(9974),P={border:0,clip:"rect(0 0 0 0)",height:"1px",margin:"-1px",overflow:"hidden",padding:0,position:"absolute",top:0,width:"1px",whiteSpace:"nowrap",wordWrap:"normal"},RouteAnnouncer=()=>{let{asPath:l}=(0,y.useRouter)(),[d,f]=g.default.useState(""),h=g.default.useRef(l);return g.default.useEffect(()=>{if(h.current!==l){if(h.current=l,document.title)f(document.title);else{var d;let h=document.querySelector("h1"),g=null!=(d=null==h?void 0:h.innerText)?d:null==h?void 0:h.textContent;f(g||l)}}},[l]),g.default.createElement("p",{"aria-live":"assertive",id:"__next-route-announcer__",role:"alert",style:P},d)},b=RouteAnnouncer;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},769:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{markAssetError:function(){return markAssetError},isAssetError:function(){return isAssetError},getClientBuildManifest:function(){return getClientBuildManifest},createRouteLoader:function(){return createRouteLoader}}),f(8754),f(8356);let h=f(6912),g=f(3436),y=f(4878);function withFuture(l,d,f){let h,g=d.get(l);if(g)return"future"in g?g.future:Promise.resolve(g);let y=new Promise(l=>{h=l});return d.set(l,g={resolve:h,future:y}),f?f().then(l=>(h(l),l)).catch(f=>{throw d.delete(l),f}):y}let P=Symbol("ASSET_LOAD_ERROR");function markAssetError(l){return Object.defineProperty(l,P,{})}function isAssetError(l){return l&&P in l}function hasPrefetch(l){try{return l=document.createElement("link"),!!window.MSInputMethodContext&&!!document.documentMode||l.relList.supports("prefetch")}catch(l){return!1}}let b=hasPrefetch(),getAssetQueryString=()=>(0,y.getDeploymentIdQueryOrEmptyString)();function prefetchViaDom(l,d,f){return new Promise((h,g)=>{let y='\n link[rel="prefetch"][href^="'+l+'"],\n link[rel="preload"][href^="'+l+'"],\n script[src^="'+l+'"]';if(document.querySelector(y))return h();f=document.createElement("link"),d&&(f.as=d),f.rel="prefetch",f.crossOrigin=void 0,f.onload=h,f.onerror=()=>g(markAssetError(Error("Failed to prefetch: "+l))),f.href=l,document.head.appendChild(f)})}function appendScript(l,d){return new Promise((f,h)=>{(d=document.createElement("script")).onload=f,d.onerror=()=>h(markAssetError(Error("Failed to load script: "+l))),d.crossOrigin=void 0,d.src=l,document.body.appendChild(d)})}function resolvePromiseWithTimeout(l,d,f){return new Promise((h,y)=>{let P=!1;l.then(l=>{P=!0,h(l)}).catch(y),(0,g.requestIdleCallback)(()=>setTimeout(()=>{P||y(f)},d))})}function getClientBuildManifest(){if(self.__BUILD_MANIFEST)return Promise.resolve(self.__BUILD_MANIFEST);let l=new Promise(l=>{let d=self.__BUILD_MANIFEST_CB;self.__BUILD_MANIFEST_CB=()=>{l(self.__BUILD_MANIFEST),d&&d()}});return resolvePromiseWithTimeout(l,3800,markAssetError(Error("Failed to load client build manifest")))}function getFilesForRoute(l,d){return getClientBuildManifest().then(f=>{if(!(d in f))throw markAssetError(Error("Failed to lookup route: "+d));let g=f[d].map(d=>l+"/_next/"+encodeURI(d));return{scripts:g.filter(l=>l.endsWith(".js")).map(l=>(0,h.__unsafeCreateTrustedScriptURL)(l)+getAssetQueryString()),css:g.filter(l=>l.endsWith(".css")).map(l=>l+getAssetQueryString())}})}function createRouteLoader(l){let d=new Map,f=new Map,h=new Map,y=new Map;function maybeExecuteScript(l){{let d=f.get(l.toString());return d||(document.querySelector('script[src^="'+l+'"]')?Promise.resolve():(f.set(l.toString(),d=appendScript(l)),d))}}function fetchStyleSheet(l){let d=h.get(l);return d||h.set(l,d=fetch(l).then(d=>{if(!d.ok)throw Error("Failed to load stylesheet: "+l);return d.text().then(d=>({href:l,content:d}))}).catch(l=>{throw markAssetError(l)})),d}return{whenEntrypoint:l=>withFuture(l,d),onEntrypoint(l,f){(f?Promise.resolve().then(()=>f()).then(l=>({component:l&&l.default||l,exports:l}),l=>({error:l})):Promise.resolve(void 0)).then(f=>{let h=d.get(l);h&&"resolve"in h?f&&(d.set(l,f),h.resolve(f)):(f?d.set(l,f):d.delete(l),y.delete(l))})},loadRoute(f,h){return withFuture(f,y,()=>{let g;return resolvePromiseWithTimeout(getFilesForRoute(l,f).then(l=>{let{scripts:h,css:g}=l;return Promise.all([d.has(f)?[]:Promise.all(h.map(maybeExecuteScript)),Promise.all(g.map(fetchStyleSheet))])}).then(l=>this.whenEntrypoint(f).then(d=>({entrypoint:d,styles:l[1]}))),3800,markAssetError(Error("Route did not complete loading: "+f))).then(l=>{let{entrypoint:d,styles:f}=l,h=Object.assign({styles:f},d);return"error"in d?d:h}).catch(l=>{if(h)throw l;return{error:l}}).finally(()=>null==g?void 0:g())})},prefetch(d){let f;return(f=navigator.connection)&&(f.saveData||/2g/.test(f.effectiveType))?Promise.resolve():getFilesForRoute(l,d).then(l=>Promise.all(b?l.scripts.map(l=>prefetchViaDom(l.toString(),"script")):[])).then(()=>{(0,g.requestIdleCallback)(()=>this.loadRoute(d,!0).catch(()=>{}))}).catch(()=>{})}}}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},9974:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{Router:function(){return y.default},default:function(){return O},withRouter:function(){return E.default},useRouter:function(){return useRouter},createRouter:function(){return createRouter},makePublicRouterInstance:function(){return makePublicRouterInstance}});let h=f(8754),g=h._(f(7294)),y=h._(f(2997)),P=f(1823),b=h._(f(676)),E=h._(f(3591)),S={router:null,readyCallbacks:[],ready(l){if(this.router)return l();this.readyCallbacks.push(l)}},w=["pathname","route","query","asPath","components","isFallback","basePath","locale","locales","defaultLocale","isReady","isPreview","isLocaleDomain","domainLocales"],R=["push","replace","reload","back","prefetch","beforePopState"];function getRouter(){if(!S.router)throw Error('No router instance found.\nYou should only use "next/router" on the client side of your app.\n');return S.router}Object.defineProperty(S,"events",{get:()=>y.default.events}),w.forEach(l=>{Object.defineProperty(S,l,{get(){let d=getRouter();return d[l]}})}),R.forEach(l=>{S[l]=function(){for(var d=arguments.length,f=Array(d),h=0;h{S.ready(()=>{y.default.events.on(l,function(){for(var d=arguments.length,f=Array(d),h=0;hl()),S.readyCallbacks=[],S.router}function makePublicRouterInstance(l){let d={};for(let f of w){if("object"==typeof l[f]){d[f]=Object.assign(Array.isArray(l[f])?[]:{},l[f]);continue}d[f]=l[f]}return d.events=y.default.events,R.forEach(f=>{d[f]=function(){for(var d=arguments.length,h=Array(d),g=0;g{if(y.default.preinit){l.forEach(l=>{y.default.preinit(l,{as:"style"})});return}{let d=document.head;l.forEach(l=>{let f=document.createElement("link");f.type="text/css",f.rel="stylesheet",f.href=l,d.appendChild(f)})}},loadScript=l=>{let{src:d,id:f,onLoad:h=()=>{},onReady:g=null,dangerouslySetInnerHTML:y,children:P="",strategy:b="afterInteractive",onError:S,stylesheets:j}=l,A=f||d;if(A&&R.has(A))return;if(w.has(d)){R.add(A),w.get(d).then(h,S);return}let afterLoad=()=>{g&&g(),R.add(A)},M=document.createElement("script"),C=new Promise((l,d)=>{M.addEventListener("load",function(d){l(),h&&h.call(this,d),afterLoad()}),M.addEventListener("error",function(l){d(l)})}).catch(function(l){S&&S(l)});for(let[f,h]of(y?(M.innerHTML=y.__html||"",afterLoad()):P?(M.textContent="string"==typeof P?P:Array.isArray(P)?P.join(""):"",afterLoad()):d&&(M.src=d,w.set(d,C)),Object.entries(l))){if(void 0===h||O.includes(f))continue;let l=E.DOMAttributeNames[f]||f.toLowerCase();M.setAttribute(l,h)}"worker"===b&&M.setAttribute("type","text/partytown"),M.setAttribute("data-nscript",b),j&&insertStylesheets(j),document.body.appendChild(M)};function handleClientScriptLoad(l){let{strategy:d="afterInteractive"}=l;"lazyOnload"===d?window.addEventListener("load",()=>{(0,S.requestIdleCallback)(()=>loadScript(l))}):loadScript(l)}function loadLazyScript(l){"complete"===document.readyState?(0,S.requestIdleCallback)(()=>loadScript(l)):window.addEventListener("load",()=>{(0,S.requestIdleCallback)(()=>loadScript(l))})}function addBeforeInteractiveToCache(){let l=[...document.querySelectorAll('[data-nscript="beforeInteractive"]'),...document.querySelectorAll('[data-nscript="beforePageRender"]')];l.forEach(l=>{let d=l.id||l.getAttribute("src");R.add(d)})}function initScriptLoader(l){l.forEach(handleClientScriptLoad),addBeforeInteractiveToCache()}function Script(l){let{id:d,src:f="",onLoad:h=()=>{},onReady:g=null,strategy:E="afterInteractive",onError:S,stylesheets:w,...O}=l,{updateScripts:j,scripts:A,getIsSsr:M,appDir:C,nonce:I}=(0,P.useContext)(b.HeadManagerContext),L=(0,P.useRef)(!1);(0,P.useEffect)(()=>{let l=d||f;L.current||(g&&l&&R.has(l)&&g(),L.current=!0)},[g,d,f]);let x=(0,P.useRef)(!1);if((0,P.useEffect)(()=>{x.current||("afterInteractive"===E?loadScript(l):"lazyOnload"===E&&loadLazyScript(l),x.current=!0)},[l,E]),("beforeInteractive"===E||"worker"===E)&&(j?(A[E]=(A[E]||[]).concat([{id:d,src:f,onLoad:h,onReady:g,onError:S,...O}]),j(A)):M&&M()?R.add(d||f):M&&!M()&&loadScript(l)),C){if(w&&w.forEach(l=>{y.default.preinit(l,{as:"style"})}),"beforeInteractive"===E)return f?(y.default.preload(f,O.integrity?{as:"script",integrity:O.integrity}:{as:"script"}),P.default.createElement("script",{nonce:I,dangerouslySetInnerHTML:{__html:"(self.__next_s=self.__next_s||[]).push("+JSON.stringify([f])+")"}})):(O.dangerouslySetInnerHTML&&(O.children=O.dangerouslySetInnerHTML.__html,delete O.dangerouslySetInnerHTML),P.default.createElement("script",{nonce:I,dangerouslySetInnerHTML:{__html:"(self.__next_s=self.__next_s||[]).push("+JSON.stringify([0,{...O}])+")"}}));"afterInteractive"===E&&f&&y.default.preload(f,O.integrity?{as:"script",integrity:O.integrity}:{as:"script"})}return null}Object.defineProperty(Script,"__nextScript",{value:!0});let j=Script;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5677:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return reportToSocket}});let h=f(2114);function reportToSocket(l){if("ended"!==l.state.state)throw Error("Expected span to be ended");(0,h.sendMessage)(JSON.stringify({event:"span-end",startTime:l.startTime,endTime:l.state.endTime,spanName:l.name,attributes:l.attributes}))}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},5944:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(6860));let Span=class Span{end(l){if("ended"===this.state.state)throw Error("Span has already ended");this.state={state:"ended",endTime:null!=l?l:Date.now()},this.onSpanEnd(this)}constructor(l,d,f){var h,g;this.name=l,this.attributes=null!=(h=d.attributes)?h:{},this.startTime=null!=(g=d.startTime)?g:Date.now(),this.onSpanEnd=f,this.state={state:"inprogress"}}};let Tracer=class Tracer{startSpan(l,d){return new Span(l,d,this.handleSpanEnd)}onSpanEnd(l){return this._emitter.on("spanend",l),()=>{this._emitter.off("spanend",l)}}constructor(){this._emitter=(0,g.default)(),this.handleSpanEnd=l=>{this._emitter.emit("spanend",l)}}};let y=new Tracer;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6912:function(l,d){"use strict";let f;function getPolicy(){if(void 0===f){var l;f=(null==(l=window.trustedTypes)?void 0:l.createPolicy("nextjs",{createHTML:l=>l,createScript:l=>l,createScriptURL:l=>l}))||null}return f}function __unsafeCreateTrustedScriptURL(l){var d;return(null==(d=getPolicy())?void 0:d.createScriptURL(l))||l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"__unsafeCreateTrustedScriptURL",{enumerable:!0,get:function(){return __unsafeCreateTrustedScriptURL}}),("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3737:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),f(4878),self.__next_set_public_path__=l=>{f.p=l},("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},3591:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return withRouter}});let h=f(8754),g=h._(f(7294)),y=f(9974);function withRouter(l){function WithRouterWrapper(d){return g.default.createElement(l,{router:(0,y.useRouter)(),...d})}return WithRouterWrapper.getInitialProps=l.getInitialProps,WithRouterWrapper.origGetInitialProps=l.origGetInitialProps,WithRouterWrapper}("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1337:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return App}});let h=f(8754),g=h._(f(7294)),y=f(109);async function appGetInitialProps(l){let{Component:d,ctx:f}=l,h=await (0,y.loadGetInitialProps)(d,f);return{pageProps:h}}let App=class App extends g.default.Component{render(){let{Component:l,pageProps:d}=this.props;return g.default.createElement(l,d)}};App.origGetInitialProps=appGetInitialProps,App.getInitialProps=appGetInitialProps,("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6908:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return Error}});let h=f(8754),g=h._(f(7294)),y=h._(f(9201)),P={400:"Bad Request",404:"This page could not be found",405:"Method Not Allowed",500:"Internal Server Error"};function _getInitialProps(l){let{res:d,err:f}=l,h=d&&d.statusCode?d.statusCode:f?f.statusCode:404;return{statusCode:h}}let b={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},desc:{lineHeight:"48px"},h1:{display:"inline-block",margin:"0 20px 0 0",paddingRight:23,fontSize:24,fontWeight:500,verticalAlign:"top"},h2:{fontSize:14,fontWeight:400,lineHeight:"28px"},wrap:{display:"inline-block"}};let Error=class Error extends g.default.Component{render(){let{statusCode:l,withDarkMode:d=!0}=this.props,f=this.props.title||P[l]||"An unexpected error has occurred";return g.default.createElement("div",{style:b.error},g.default.createElement(y.default,null,g.default.createElement("title",null,l?l+": "+f:"Application error: a client-side exception has occurred")),g.default.createElement("div",{style:b.desc},g.default.createElement("style",{dangerouslySetInnerHTML:{__html:"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}"+(d?"@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}":"")}}),l?g.default.createElement("h1",{className:"next-error-h1",style:b.h1},l):null,g.default.createElement("div",{style:b.wrap},g.default.createElement("h2",{style:b.h2},this.props.title||l?f:g.default.createElement(g.default.Fragment,null,"Application error: a client-side exception has occurred (see the browser console for more information)"),"."))))}};Error.displayName="ErrorPage",Error.getInitialProps=_getInitialProps,Error.origGetInitialProps=_getInitialProps,("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},6861:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"AmpStateContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext({})},7543:function(l,d){"use strict";function isInAmpMode(l){let{ampFirst:d=!1,hybrid:f=!1,hasQuery:h=!1}=void 0===l?{}:l;return d||f&&h}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isInAmpMode",{enumerable:!0,get:function(){return isInAmpMode}})},9031:function(l,d,f){"use strict";var h,g;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{CacheStates:function(){return h},AppRouterContext:function(){return b},LayoutRouterContext:function(){return E},GlobalLayoutRouterContext:function(){return S},TemplateContext:function(){return w}});let y=f(8754),P=y._(f(7294));(g=h||(h={})).LAZY_INITIALIZED="LAZYINITIALIZED",g.DATA_FETCH="DATAFETCH",g.READY="READY";let b=P.default.createContext(null),E=P.default.createContext(null),S=P.default.createContext(null),w=P.default.createContext(null)},684:function(l,d){"use strict";function murmurhash2(l){let d=0;for(let f=0;f>>13,d=Math.imul(d,1540483477)}return d>>>0}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"BloomFilter",{enumerable:!0,get:function(){return BloomFilter}});let BloomFilter=class BloomFilter{static from(l,d){void 0===d&&(d=.01);let f=new BloomFilter(l.length,d);for(let d of l)f.add(d);return f}export(){let l={numItems:this.numItems,errorRate:this.errorRate,numBits:this.numBits,numHashes:this.numHashes,bitArray:this.bitArray};return l}import(l){this.numItems=l.numItems,this.errorRate=l.errorRate,this.numBits=l.numBits,this.numHashes=l.numHashes,this.bitArray=l.bitArray}add(l){let d=this.getHashValues(l);d.forEach(l=>{this.bitArray[l]=1})}contains(l){let d=this.getHashValues(l);return d.every(l=>this.bitArray[l])}getHashValues(l){let d=[];for(let f=1;f<=this.numHashes;f++){let h=murmurhash2(""+l+f)%this.numBits;d.push(h)}return d}constructor(l,d){this.numItems=l,this.errorRate=d,this.numBits=Math.ceil(-(l*Math.log(d))/(Math.log(2)*Math.log(2))),this.numHashes=Math.ceil(this.numBits/l*Math.log(2)),this.bitArray=Array(this.numBits).fill(0)}}},2338:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{MODERN_BROWSERSLIST_TARGET:function(){return g.default},COMPILER_NAMES:function(){return y},INTERNAL_HEADERS:function(){return P},COMPILER_INDEXES:function(){return b},PHASE_EXPORT:function(){return E},PHASE_PRODUCTION_BUILD:function(){return S},PHASE_PRODUCTION_SERVER:function(){return w},PHASE_DEVELOPMENT_SERVER:function(){return R},PHASE_TEST:function(){return O},PHASE_INFO:function(){return j},PAGES_MANIFEST:function(){return A},APP_PATHS_MANIFEST:function(){return M},APP_PATH_ROUTES_MANIFEST:function(){return C},BUILD_MANIFEST:function(){return I},APP_BUILD_MANIFEST:function(){return L},FUNCTIONS_CONFIG_MANIFEST:function(){return x},SUBRESOURCE_INTEGRITY_MANIFEST:function(){return N},NEXT_FONT_MANIFEST:function(){return D},EXPORT_MARKER:function(){return k},EXPORT_DETAIL:function(){return F},PRERENDER_MANIFEST:function(){return U},ROUTES_MANIFEST:function(){return H},IMAGES_MANIFEST:function(){return B},SERVER_FILES_MANIFEST:function(){return W},DEV_CLIENT_PAGES_MANIFEST:function(){return q},MIDDLEWARE_MANIFEST:function(){return z},DEV_MIDDLEWARE_MANIFEST:function(){return G},REACT_LOADABLE_MANIFEST:function(){return V},FONT_MANIFEST:function(){return X},SERVER_DIRECTORY:function(){return K},CONFIG_FILES:function(){return Y},BUILD_ID_FILE:function(){return Q},BLOCKED_PAGES:function(){return $},CLIENT_PUBLIC_FILES_PATH:function(){return J},CLIENT_STATIC_FILES_PATH:function(){return Z},STRING_LITERAL_DROP_BUNDLE:function(){return ee},NEXT_BUILTIN_DOCUMENT:function(){return et},BARREL_OPTIMIZATION_PREFIX:function(){return er},CLIENT_REFERENCE_MANIFEST:function(){return en},SERVER_REFERENCE_MANIFEST:function(){return ea},MIDDLEWARE_BUILD_MANIFEST:function(){return eo},MIDDLEWARE_REACT_LOADABLE_MANIFEST:function(){return ei},CLIENT_STATIC_FILES_RUNTIME_MAIN:function(){return el},CLIENT_STATIC_FILES_RUNTIME_MAIN_APP:function(){return eu},APP_CLIENT_INTERNALS:function(){return es},CLIENT_STATIC_FILES_RUNTIME_REACT_REFRESH:function(){return ec},CLIENT_STATIC_FILES_RUNTIME_AMP:function(){return ed},CLIENT_STATIC_FILES_RUNTIME_WEBPACK:function(){return ef},CLIENT_STATIC_FILES_RUNTIME_POLYFILLS:function(){return ep},CLIENT_STATIC_FILES_RUNTIME_POLYFILLS_SYMBOL:function(){return eh},EDGE_RUNTIME_WEBPACK:function(){return em},TEMPORARY_REDIRECT_STATUS:function(){return eg},PERMANENT_REDIRECT_STATUS:function(){return e_},STATIC_PROPS_ID:function(){return ey},SERVER_PROPS_ID:function(){return ev},PAGE_SEGMENT_KEY:function(){return eP},GOOGLE_FONT_PROVIDER:function(){return eb},OPTIMIZED_FONT_PROVIDERS:function(){return eE},DEFAULT_SERIF_FONT:function(){return eS},DEFAULT_SANS_SERIF_FONT:function(){return ew},STATIC_STATUS_PAGES:function(){return eR},TRACE_OUTPUT_VERSION:function(){return eO},TURBO_TRACE_DEFAULT_MEMORY_LIMIT:function(){return ej},RSC_MODULE_TYPES:function(){return eA},EDGE_UNSUPPORTED_NODE_APIS:function(){return eT},SYSTEM_ENTRYPOINTS:function(){return eM}});let h=f(8754),g=h._(f(8855)),y={client:"client",server:"server",edgeServer:"edge-server"},P=["x-invoke-path","x-invoke-status","x-invoke-error","x-invoke-query","x-middleware-invoke"],b={[y.client]:0,[y.server]:1,[y.edgeServer]:2},E="phase-export",S="phase-production-build",w="phase-production-server",R="phase-development-server",O="phase-test",j="phase-info",A="pages-manifest.json",M="app-paths-manifest.json",C="app-path-routes-manifest.json",I="build-manifest.json",L="app-build-manifest.json",x="functions-config-manifest.json",N="subresource-integrity-manifest",D="next-font-manifest",k="export-marker.json",F="export-detail.json",U="prerender-manifest.json",H="routes-manifest.json",B="images-manifest.json",W="required-server-files.json",q="_devPagesManifest.json",z="middleware-manifest.json",G="_devMiddlewareManifest.json",V="react-loadable-manifest.json",X="font-manifest.json",K="server",Y=["next.config.js","next.config.mjs"],Q="BUILD_ID",$=["/_document","/_app","/_error"],J="public",Z="static",ee="__NEXT_DROP_CLIENT_FILE__",et="__NEXT_BUILTIN_DOCUMENT__",er="__barrel_optimize__",en="client-reference-manifest",ea="server-reference-manifest",eo="middleware-build-manifest",ei="middleware-react-loadable-manifest",el="main",eu=""+el+"-app",es="app-pages-internals",ec="react-refresh",ed="amp",ef="webpack",ep="polyfills",eh=Symbol(ep),em="edge-runtime-webpack",eg=307,e_=308,ey="__N_SSG",ev="__N_SSP",eP="__PAGE__",eb="https://fonts.googleapis.com/",eE=[{url:eb,preconnect:"https://fonts.gstatic.com"},{url:"https://use.typekit.net",preconnect:"https://use.typekit.net"}],eS={name:"Times New Roman",xAvgCharWidth:821,azAvgWidth:854.3953488372093,unitsPerEm:2048},ew={name:"Arial",xAvgCharWidth:904,azAvgWidth:934.5116279069767,unitsPerEm:2048},eR=["/500"],eO=1,ej=6e3,eA={client:"client",server:"server"},eT=["clearImmediate","setImmediate","BroadcastChannel","ByteLengthQueuingStrategy","CompressionStream","CountQueuingStrategy","DecompressionStream","DomException","MessageChannel","MessageEvent","MessagePort","ReadableByteStreamController","ReadableStreamBYOBRequest","ReadableStreamDefaultController","TransformStreamDefaultController","WritableStreamDefaultController"],eM=new Set([el,ec,ed,eu]);("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},997:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"escapeStringRegexp",{enumerable:!0,get:function(){return escapeStringRegexp}});let f=/[|\\{}()[\]^$+*?.-]/,h=/[|\\{}()[\]^$+*?.-]/g;function escapeStringRegexp(l){return f.test(l)?l.replace(h,"\\$&"):l}},6734:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"HeadManagerContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext({})},9201:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{defaultHead:function(){return defaultHead},default:function(){return R}});let h=f(8754),g=f(1757),y=g._(f(7294)),P=h._(f(8955)),b=f(6861),E=f(6734),S=f(7543);function defaultHead(l){void 0===l&&(l=!1);let d=[y.default.createElement("meta",{charSet:"utf-8"})];return l||d.push(y.default.createElement("meta",{name:"viewport",content:"width=device-width"})),d}function onlyReactElement(l,d){return"string"==typeof d||"number"==typeof d?l:d.type===y.default.Fragment?l.concat(y.default.Children.toArray(d.props.children).reduce((l,d)=>"string"==typeof d||"number"==typeof d?l:l.concat(d),[])):l.concat(d)}f(1905);let w=["name","httpEquiv","charSet","itemProp"];function unique(){let l=new Set,d=new Set,f=new Set,h={};return g=>{let y=!0,P=!1;if(g.key&&"number"!=typeof g.key&&g.key.indexOf("$")>0){P=!0;let d=g.key.slice(g.key.indexOf("$")+1);l.has(d)?y=!1:l.add(d)}switch(g.type){case"title":case"base":d.has(g.type)?y=!1:d.add(g.type);break;case"meta":for(let l=0,d=w.length;l{let h=l.key||d;if(!f&&"link"===l.type&&l.props.href&&["https://fonts.googleapis.com/css","https://use.typekit.net/"].some(d=>l.props.href.startsWith(d))){let d={...l.props||{}};return d["data-href"]=d.href,d.href=void 0,d["data-optimized-fonts"]=!0,y.default.cloneElement(l,d)}return y.default.cloneElement(l,{key:h})})}function Head(l){let{children:d}=l,f=(0,y.useContext)(b.AmpStateContext),h=(0,y.useContext)(E.HeadManagerContext);return y.default.createElement(P.default,{reduceComponentsToState:reduceComponents,headManager:h,inAmpMode:(0,S.isInAmpMode)(f)},d)}let R=Head;("function"==typeof d.default||"object"==typeof d.default&&null!==d.default)&&void 0===d.default.__esModule&&(Object.defineProperty(d.default,"__esModule",{value:!0}),Object.assign(d.default,d),l.exports=d.default)},1593:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{SearchParamsContext:function(){return g},PathnameContext:function(){return y},PathParamsContext:function(){return P}});let h=f(7294),g=(0,h.createContext)(null),y=(0,h.createContext)(null),P=(0,h.createContext)(null)},1774:function(l,d){"use strict";function normalizeLocalePath(l,d){let f;let h=l.split("/");return(d||[]).some(d=>!!h[1]&&h[1].toLowerCase()===d.toLowerCase()&&(f=d,h.splice(1,1),l=h.join("/")||"/",!0)),{pathname:l,detectedLocale:f}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizeLocalePath",{enumerable:!0,get:function(){return normalizeLocalePath}})},869:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"ImageConfigContext",{enumerable:!0,get:function(){return P}});let h=f(8754),g=h._(f(7294)),y=f(5494),P=g.default.createContext(y.imageConfigDefault)},5494:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{VALID_LOADERS:function(){return f},imageConfigDefault:function(){return h}});let f=["default","imgix","cloudinary","akamai","custom"],h={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",loaderFile:"",domains:[],disableStaticImages:!1,minimumCacheTTL:60,formats:["image/webp"],dangerouslyAllowSVG:!1,contentSecurityPolicy:"script-src 'none'; frame-src 'none'; sandbox;",contentDispositionType:"inline",remotePatterns:[],unoptimized:!1}},5585:function(l,d){"use strict";function getObjectClassLabel(l){return Object.prototype.toString.call(l)}function isPlainObject(l){if("[object Object]"!==getObjectClassLabel(l))return!1;let d=Object.getPrototypeOf(l);return null===d||d.hasOwnProperty("isPrototypeOf")}Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getObjectClassLabel:function(){return getObjectClassLabel},isPlainObject:function(){return isPlainObject}})},6146:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{NEXT_DYNAMIC_NO_SSR_CODE:function(){return f},throwWithNoSSR:function(){return throwWithNoSSR}});let f="NEXT_DYNAMIC_NO_SSR_CODE";function throwWithNoSSR(){let l=Error(f);throw l.digest=f,l}},6860:function(l,d){"use strict";function mitt(){let l=Object.create(null);return{on(d,f){(l[d]||(l[d]=[])).push(f)},off(d,f){l[d]&&l[d].splice(l[d].indexOf(f)>>>0,1)},emit(d){for(var f=arguments.length,h=Array(f>1?f-1:0),g=1;g{l(...h)})}}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return mitt}})},8855:function(l){"use strict";l.exports=["chrome 64","edge 79","firefox 67","opera 51","safari 12"]},3035:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"denormalizePagePath",{enumerable:!0,get:function(){return denormalizePagePath}});let h=f(8410),g=f(9153);function denormalizePagePath(l){let d=(0,g.normalizePathSep)(l);return d.startsWith("/index/")&&!(0,h.isDynamicRoute)(d)?d.slice(6):"/index"!==d?d:"/"}},504:function(l,d){"use strict";function ensureLeadingSlash(l){return l.startsWith("/")?l:"/"+l}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"ensureLeadingSlash",{enumerable:!0,get:function(){return ensureLeadingSlash}})},9153:function(l,d){"use strict";function normalizePathSep(l){return l.replace(/\\/g,"/")}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"normalizePathSep",{enumerable:!0,get:function(){return normalizePathSep}})},1823:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"RouterContext",{enumerable:!0,get:function(){return y}});let h=f(8754),g=h._(f(7294)),y=g.default.createContext(null)},9642:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{adaptForAppRouterInstance:function(){return adaptForAppRouterInstance},adaptForSearchParams:function(){return adaptForSearchParams},adaptForPathParams:function(){return adaptForPathParams},PathnameContextProviderAdapter:function(){return PathnameContextProviderAdapter}});let h=f(1757),g=h._(f(7294)),y=f(1593),P=f(8410),b=f(106),E=f(2839);function adaptForAppRouterInstance(l){return{back(){l.back()},forward(){l.forward()},refresh(){l.reload()},push(d,f){let{scroll:h}=void 0===f?{}:f;l.push(d,void 0,{scroll:h})},replace(d,f){let{scroll:h}=void 0===f?{}:f;l.replace(d,void 0,{scroll:h})},prefetch(d){l.prefetch(d)}}}function adaptForSearchParams(l){return l.isReady&&l.query?(0,b.asPathToSearchParams)(l.asPath):new URLSearchParams}function adaptForPathParams(l){if(!l.isReady||!l.query)return null;let d={},f=(0,E.getRouteRegex)(l.pathname),h=Object.keys(f.groups);for(let f of h)d[f]=l.query[f];return d}function PathnameContextProviderAdapter(l){let{children:d,router:f,...h}=l,b=(0,g.useRef)(h.isAutoExport),E=(0,g.useMemo)(()=>{let l;let d=b.current;if(d&&(b.current=!1),(0,P.isDynamicRoute)(f.pathname)&&(f.isFallback||d&&!f.isReady))return null;try{l=new URL(f.asPath,"http://f")}catch(l){return"/"}return l.pathname},[f.asPath,f.isFallback,f.isReady,f.pathname]);return g.default.createElement(y.PathnameContext.Provider,{value:E},d)}},2997:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return Router},matchesMiddleware:function(){return matchesMiddleware},createKey:function(){return createKey}});let h=f(8754),g=f(1757),y=f(7425),P=f(769),b=f(5354),E=g._(f(676)),S=f(3035),w=f(1774),R=h._(f(6860)),O=f(109),j=f(9203),A=f(1748);f(2431);let M=f(2142),C=f(2839),I=f(4364);f(6728);let L=f(1156),x=f(3607),N=f(5637),D=f(8961),k=f(7192),F=f(6864),U=f(4450),H=f(9423),B=f(7007),W=f(7841),q=f(7763),z=f(2227),G=f(5119),V=f(6455),X=f(2969),K=f(3937);function buildCancellationError(){return Object.assign(Error("Route Cancelled"),{cancelled:!0})}async function matchesMiddleware(l){let d=await Promise.resolve(l.router.pageLoader.getMiddleware());if(!d)return!1;let{pathname:f}=(0,L.parsePath)(l.asPath),h=(0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,g=(0,k.addBasePath)((0,x.addLocale)(h,l.locale));return d.some(l=>new RegExp(l.regexp).test(g))}function stripOrigin(l){let d=(0,O.getLocationOrigin)();return l.startsWith(d)?l.substring(d.length):l}function prepareUrlAs(l,d,f){let[h,g]=(0,U.resolveHref)(l,d,!0),y=(0,O.getLocationOrigin)(),P=h.startsWith(y),b=g&&g.startsWith(y);h=stripOrigin(h),g=g?stripOrigin(g):g;let E=P?h:(0,k.addBasePath)(h),S=f?stripOrigin((0,U.resolveHref)(l,f)):g||h;return{url:E,as:b?S:(0,k.addBasePath)(S)}}function resolveDynamicRoute(l,d){let f=(0,y.removeTrailingSlash)((0,S.denormalizePagePath)(l));return"/404"===f||"/_error"===f?l:(d.includes(f)||d.some(d=>{if((0,j.isDynamicRoute)(d)&&(0,C.getRouteRegex)(d).re.test(f))return l=d,!0}),(0,y.removeTrailingSlash)(l))}function getMiddlewareData(l,d,f){let h={basePath:f.router.basePath,i18n:{locales:f.router.locales},trailingSlash:!1},g=d.headers.get("x-nextjs-rewrite"),b=g||d.headers.get("x-nextjs-matched-path"),E=d.headers.get("x-matched-path");if(!E||b||E.includes("__next_data_catchall")||E.includes("/_error")||E.includes("/404")||(b=E),b){if(b.startsWith("/")){let d=(0,A.parseRelativeUrl)(b),E=(0,B.getNextPathnameInfo)(d.pathname,{nextConfig:h,parseData:!0}),S=(0,y.removeTrailingSlash)(E.pathname);return Promise.all([f.router.pageLoader.getPageList(),(0,P.getClientBuildManifest)()]).then(y=>{let[P,{__rewrites:b}]=y,R=(0,x.addLocale)(E.pathname,E.locale);if((0,j.isDynamicRoute)(R)||!g&&P.includes((0,w.normalizeLocalePath)((0,D.removeBasePath)(R),f.router.locales).pathname)){let f=(0,B.getNextPathnameInfo)((0,A.parseRelativeUrl)(l).pathname,{nextConfig:h,parseData:!0});R=(0,k.addBasePath)(f.pathname),d.pathname=R}if(!P.includes(S)){let l=resolveDynamicRoute(S,P);l!==S&&(S=l)}let O=P.includes(S)?S:resolveDynamicRoute((0,w.normalizeLocalePath)((0,D.removeBasePath)(d.pathname),f.router.locales).pathname,P);if((0,j.isDynamicRoute)(O)){let l=(0,M.getRouteMatcher)((0,C.getRouteRegex)(O))(R);Object.assign(d.query,l||{})}return{type:"rewrite",parsedAs:d,resolvedHref:O}})}let d=(0,L.parsePath)(l),E=(0,W.formatNextPathnameInfo)({...(0,B.getNextPathnameInfo)(d.pathname,{nextConfig:h,parseData:!0}),defaultLocale:f.router.defaultLocale,buildId:""});return Promise.resolve({type:"redirect-external",destination:""+E+d.query+d.hash})}let S=d.headers.get("x-nextjs-redirect");if(S){if(S.startsWith("/")){let l=(0,L.parsePath)(S),d=(0,W.formatNextPathnameInfo)({...(0,B.getNextPathnameInfo)(l.pathname,{nextConfig:h,parseData:!0}),defaultLocale:f.router.defaultLocale,buildId:""});return Promise.resolve({type:"redirect-internal",newAs:""+d+l.query+l.hash,newUrl:""+d+l.query+l.hash})}return Promise.resolve({type:"redirect-external",destination:S})}return Promise.resolve({type:"next"})}async function withMiddlewareEffects(l){let d=await matchesMiddleware(l);if(!d||!l.fetchData)return null;try{let d=await l.fetchData(),f=await getMiddlewareData(d.dataHref,d.response,l);return{dataHref:d.dataHref,json:d.json,response:d.response,text:d.text,cacheKey:d.cacheKey,effect:f}}catch(l){return null}}let Y=Symbol("SSG_DATA_NOT_FOUND");function fetchRetry(l,d,f){return fetch(l,{credentials:"same-origin",method:f.method||"GET",headers:Object.assign({},f.headers,{"x-nextjs-data":"1"})}).then(h=>!h.ok&&d>1&&h.status>=500?fetchRetry(l,d-1,f):h)}function tryToParseAsJSON(l){try{return JSON.parse(l)}catch(l){return null}}function fetchNextData(l){var d;let{dataHref:f,inflightCache:h,isPrefetch:g,hasMiddleware:y,isServerRender:b,parseJSON:E,persistCache:S,isBackground:w,unstable_skipClientCache:R}=l,{href:O}=new URL(f,window.location.href),getData=l=>fetchRetry(f,b?3:1,{headers:Object.assign({},g?{purpose:"prefetch"}:{},g&&y?{"x-middleware-prefetch":"1"}:{}),method:null!=(d=null==l?void 0:l.method)?d:"GET"}).then(d=>d.ok&&(null==l?void 0:l.method)==="HEAD"?{dataHref:f,response:d,text:"",json:{},cacheKey:O}:d.text().then(l=>{if(!d.ok){if(y&&[301,302,307,308].includes(d.status))return{dataHref:f,response:d,text:l,json:{},cacheKey:O};if(404===d.status){var h;if(null==(h=tryToParseAsJSON(l))?void 0:h.notFound)return{dataHref:f,json:{notFound:Y},response:d,text:l,cacheKey:O}}let g=Error("Failed to load static props");throw b||(0,P.markAssetError)(g),g}return{dataHref:f,json:E?tryToParseAsJSON(l):null,response:d,text:l,cacheKey:O}})).then(l=>(S&&"no-cache"!==l.response.headers.get("x-middleware-cache")||delete h[O],l)).catch(l=>{throw R||delete h[O],("Failed to fetch"===l.message||"NetworkError when attempting to fetch resource."===l.message||"Load failed"===l.message)&&(0,P.markAssetError)(l),l});return R&&S?getData({}).then(l=>(h[O]=Promise.resolve(l),l)):void 0!==h[O]?h[O]:h[O]=getData(w?{method:"HEAD"}:{})}function createKey(){return Math.random().toString(36).slice(2,10)}function handleHardNavigation(l){let{url:d,router:f}=l;if(d===(0,k.addBasePath)((0,x.addLocale)(f.asPath,f.locale)))throw Error("Invariant: attempted to hard navigate to the same URL "+d+" "+location.href);window.location.href=d}let getCancelledHandler=l=>{let{route:d,router:f}=l,h=!1,g=f.clc=()=>{h=!0};return()=>{if(h){let l=Error('Abort fetching component for route: "'+d+'"');throw l.cancelled=!0,l}g===f.clc&&(f.clc=null)}};let Router=class Router{reload(){window.location.reload()}back(){window.history.back()}forward(){window.history.forward()}push(l,d,f){return void 0===f&&(f={}),{url:l,as:d}=prepareUrlAs(this,l,d),this.change("pushState",l,d,f)}replace(l,d,f){return void 0===f&&(f={}),{url:l,as:d}=prepareUrlAs(this,l,d),this.change("replaceState",l,d,f)}async _bfl(l,d,f,h){{let E=!1,S=!1;for(let w of[l,d])if(w){let d=(0,y.removeTrailingSlash)(new URL(w,"http://n").pathname),R=(0,k.addBasePath)((0,x.addLocale)(d,f||this.locale));if(d!==(0,y.removeTrailingSlash)(new URL(this.asPath,"http://n").pathname)){var g,P,b;for(let l of(E=E||!!(null==(g=this._bfl_s)?void 0:g.contains(d))||!!(null==(P=this._bfl_s)?void 0:P.contains(R)),[d,R])){let d=l.split("/");for(let l=0;!S&&l{})}}}}return!1}async change(l,d,f,h,g){var S,w,R,U,H,B,W,G,K;let Q,$;if(!(0,z.isLocalURL)(d))return handleHardNavigation({url:d,router:this}),!1;let J=1===h._h;J||h.shallow||await this._bfl(f,void 0,h.locale);let Z=J||h._shouldResolveHref||(0,L.parsePath)(d).pathname===(0,L.parsePath)(f).pathname,ee={...this.state},et=!0!==this.isReady;this.isReady=!0;let er=this.isSsr;if(J||(this.isSsr=!1),J&&this.clc)return!1;let en=ee.locale;O.ST&&performance.mark("routeChange");let{shallow:ea=!1,scroll:eo=!0}=h,ei={shallow:ea};this._inFlightRoute&&this.clc&&(er||Router.events.emit("routeChangeError",buildCancellationError(),this._inFlightRoute,ei),this.clc(),this.clc=null),f=(0,k.addBasePath)((0,x.addLocale)((0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,h.locale,this.defaultLocale));let el=(0,N.removeLocale)((0,F.hasBasePath)(f)?(0,D.removeBasePath)(f):f,ee.locale);this._inFlightRoute=f;let eu=en!==ee.locale;if(!J&&this.onlyAHashChange(el)&&!eu){ee.asPath=el,Router.events.emit("hashChangeStart",f,ei),this.changeState(l,d,f,{...h,scroll:!1}),eo&&this.scrollToHash(el);try{await this.set(ee,this.components[ee.route],null)}catch(l){throw(0,E.default)(l)&&l.cancelled&&Router.events.emit("routeChangeError",l,el,ei),l}return Router.events.emit("hashChangeComplete",f,ei),!0}let es=(0,A.parseRelativeUrl)(d),{pathname:ec,query:ed}=es;if(null==(S=this.components[ec])?void 0:S.__appRouter)return handleHardNavigation({url:f,router:this}),new Promise(()=>{});try{[Q,{__rewrites:$}]=await Promise.all([this.pageLoader.getPageList(),(0,P.getClientBuildManifest)(),this.pageLoader.getMiddleware()])}catch(l){return handleHardNavigation({url:f,router:this}),!1}this.urlIsNew(el)||eu||(l="replaceState");let ef=f;ec=ec?(0,y.removeTrailingSlash)((0,D.removeBasePath)(ec)):ec;let ep=(0,y.removeTrailingSlash)(ec),eh=f.startsWith("/")&&(0,A.parseRelativeUrl)(f).pathname,em=!!(eh&&ep!==eh&&(!(0,j.isDynamicRoute)(ep)||!(0,M.getRouteMatcher)((0,C.getRouteRegex)(ep))(eh))),eg=!h.shallow&&await matchesMiddleware({asPath:f,locale:ee.locale,router:this});if(J&&eg&&(Z=!1),Z&&"/_error"!==ec&&(h._shouldResolveHref=!0,es.pathname=resolveDynamicRoute(ec,Q),es.pathname===ec||(ec=es.pathname,es.pathname=(0,k.addBasePath)(ec),eg||(d=(0,I.formatWithValidation)(es)))),!(0,z.isLocalURL)(f))return handleHardNavigation({url:f,router:this}),!1;ef=(0,N.removeLocale)((0,D.removeBasePath)(ef),ee.locale),ep=(0,y.removeTrailingSlash)(ec);let e_=!1;if((0,j.isDynamicRoute)(ep)){let l=(0,A.parseRelativeUrl)(ef),h=l.pathname,g=(0,C.getRouteRegex)(ep);e_=(0,M.getRouteMatcher)(g)(h);let y=ep===h,P=y?(0,X.interpolateAs)(ep,h,ed):{};if(e_&&(!y||P.result))y?f=(0,I.formatWithValidation)(Object.assign({},l,{pathname:P.result,query:(0,V.omit)(ed,P.params)})):Object.assign(ed,e_);else{let l=Object.keys(g.groups).filter(l=>!ed[l]&&!g.groups[l].optional);if(l.length>0&&!eg)throw Error((y?"The provided `href` ("+d+") value is missing query values ("+l.join(", ")+") to be interpolated properly. ":"The provided `as` value ("+h+") is incompatible with the `href` value ("+ep+"). ")+"Read more: https://nextjs.org/docs/messages/"+(y?"href-interpolation-failed":"incompatible-href-as"))}}J||Router.events.emit("routeChangeStart",f,ei);let ey="/404"===this.pathname||"/_error"===this.pathname;try{let y=await this.getRouteInfo({route:ep,pathname:ec,query:ed,as:f,resolvedAs:ef,routeProps:ei,locale:ee.locale,isPreview:ee.isPreview,hasMiddleware:eg,unstable_skipClientCache:h.unstable_skipClientCache,isQueryUpdating:J&&!this.isFallback,isMiddlewareRewrite:em});if(J||h.shallow||await this._bfl(f,"resolvedAs"in y?y.resolvedAs:void 0,ee.locale),"route"in y&&eg){ep=ec=y.route||ep,ei.shallow||(ed=Object.assign({},y.query||{},ed));let l=(0,F.hasBasePath)(es.pathname)?(0,D.removeBasePath)(es.pathname):es.pathname;if(e_&&ec!==l&&Object.keys(e_).forEach(l=>{e_&&ed[l]===e_[l]&&delete ed[l]}),(0,j.isDynamicRoute)(ec)){let l=!ei.shallow&&y.resolvedAs?y.resolvedAs:(0,k.addBasePath)((0,x.addLocale)(new URL(f,location.href).pathname,ee.locale),!0),d=l;(0,F.hasBasePath)(d)&&(d=(0,D.removeBasePath)(d));let h=(0,C.getRouteRegex)(ec),g=(0,M.getRouteMatcher)(h)(new URL(d,location.href).pathname);g&&Object.assign(ed,g)}}if("type"in y){if("redirect-internal"===y.type)return this.change(l,y.newUrl,y.newAs,h);return handleHardNavigation({url:y.destination,router:this}),new Promise(()=>{})}let P=y.Component;if(P&&P.unstable_scriptLoader){let l=[].concat(P.unstable_scriptLoader());l.forEach(l=>{(0,b.handleClientScriptLoad)(l.props)})}if((y.__N_SSG||y.__N_SSP)&&y.props){if(y.props.pageProps&&y.props.pageProps.__N_REDIRECT){h.locale=!1;let d=y.props.pageProps.__N_REDIRECT;if(d.startsWith("/")&&!1!==y.props.pageProps.__N_REDIRECT_BASE_PATH){let f=(0,A.parseRelativeUrl)(d);f.pathname=resolveDynamicRoute(f.pathname,Q);let{url:g,as:y}=prepareUrlAs(this,d,d);return this.change(l,g,y,h)}return handleHardNavigation({url:d,router:this}),new Promise(()=>{})}if(ee.isPreview=!!y.props.__N_PREVIEW,y.props.notFound===Y){let l;try{await this.fetchComponent("/404"),l="/404"}catch(d){l="/_error"}if(y=await this.getRouteInfo({route:l,pathname:l,query:ed,as:f,resolvedAs:ef,routeProps:{shallow:!1},locale:ee.locale,isPreview:ee.isPreview,isNotFound:!0}),"type"in y)throw Error("Unexpected middleware effect on /404")}}J&&"/_error"===this.pathname&&(null==(R=self.__NEXT_DATA__.props)?void 0:null==(w=R.pageProps)?void 0:w.statusCode)===500&&(null==(U=y.props)?void 0:U.pageProps)&&(y.props.pageProps.statusCode=500);let S=h.shallow&&ee.route===(null!=(H=y.route)?H:ep),O=null!=(B=h.scroll)?B:!J&&!S,I=null!=g?g:O?{x:0,y:0}:null,L={...ee,route:ep,pathname:ec,query:ed,asPath:el,isFallback:!1};if(J&&ey){if(y=await this.getRouteInfo({route:this.pathname,pathname:this.pathname,query:ed,as:f,resolvedAs:ef,routeProps:{shallow:!1},locale:ee.locale,isPreview:ee.isPreview,isQueryUpdating:J&&!this.isFallback}),"type"in y)throw Error("Unexpected middleware effect on "+this.pathname);"/_error"===this.pathname&&(null==(G=self.__NEXT_DATA__.props)?void 0:null==(W=G.pageProps)?void 0:W.statusCode)===500&&(null==(K=y.props)?void 0:K.pageProps)&&(y.props.pageProps.statusCode=500);try{await this.set(L,y,I)}catch(l){throw(0,E.default)(l)&&l.cancelled&&Router.events.emit("routeChangeError",l,el,ei),l}return!0}Router.events.emit("beforeHistoryChange",f,ei),this.changeState(l,d,f,h);let N=J&&!I&&!et&&!eu&&(0,q.compareRouterStates)(L,this.state);if(!N){try{await this.set(L,y,I)}catch(l){if(l.cancelled)y.error=y.error||l;else throw l}if(y.error)throw J||Router.events.emit("routeChangeError",y.error,el,ei),y.error;J||Router.events.emit("routeChangeComplete",f,ei),O&&/#.+$/.test(f)&&this.scrollToHash(f)}return!0}catch(l){if((0,E.default)(l)&&l.cancelled)return!1;throw l}}changeState(l,d,f,h){void 0===h&&(h={}),("pushState"!==l||(0,O.getURL)()!==f)&&(this._shallow=h.shallow,window.history[l]({url:d,as:f,options:h,__N:!0,key:this._key="pushState"!==l?this._key:createKey()},"",f))}async handleRouteInfoError(l,d,f,h,g,y){if(console.error(l),l.cancelled)throw l;if((0,P.isAssetError)(l)||y)throw Router.events.emit("routeChangeError",l,h,g),handleHardNavigation({url:h,router:this}),buildCancellationError();try{let h;let{page:g,styleSheets:y}=await this.fetchComponent("/_error"),P={props:h,Component:g,styleSheets:y,err:l,error:l};if(!P.props)try{P.props=await this.getInitialProps(g,{err:l,pathname:d,query:f})}catch(l){console.error("Error in error page `getInitialProps`: ",l),P.props={}}return P}catch(l){return this.handleRouteInfoError((0,E.default)(l)?l:Error(l+""),d,f,h,g,!0)}}async getRouteInfo(l){let{route:d,pathname:f,query:h,as:g,resolvedAs:P,routeProps:b,locale:S,hasMiddleware:R,isPreview:O,unstable_skipClientCache:j,isQueryUpdating:A,isMiddlewareRewrite:M,isNotFound:C}=l,L=d;try{var x,N,k,F;let l=getCancelledHandler({route:L,router:this}),d=this.components[L];if(b.shallow&&d&&this.route===L)return d;R&&(d=void 0);let E=!d||"initial"in d?void 0:d,U={dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:f,query:h}),skipInterpolation:!0,asPath:C?"/404":P,locale:S}),hasMiddleware:!0,isServerRender:this.isSsr,parseJSON:!0,inflightCache:A?this.sbc:this.sdc,persistCache:!O,isPrefetch:!1,unstable_skipClientCache:j,isBackground:A},B=A&&!M?null:await withMiddlewareEffects({fetchData:()=>fetchNextData(U),asPath:C?"/404":P,locale:S,router:this}).catch(l=>{if(A)return null;throw l});if(B&&("/_error"===f||"/404"===f)&&(B.effect=void 0),A&&(B?B.json=self.__NEXT_DATA__.props:B={json:self.__NEXT_DATA__.props}),l(),(null==B?void 0:null==(x=B.effect)?void 0:x.type)==="redirect-internal"||(null==B?void 0:null==(N=B.effect)?void 0:N.type)==="redirect-external")return B.effect;if((null==B?void 0:null==(k=B.effect)?void 0:k.type)==="rewrite"){let l=(0,y.removeTrailingSlash)(B.effect.resolvedHref),g=await this.pageLoader.getPageList();if((!A||g.includes(l))&&(L=l,f=B.effect.resolvedHref,h={...h,...B.effect.parsedAs.query},P=(0,D.removeBasePath)((0,w.normalizeLocalePath)(B.effect.parsedAs.pathname,this.locales).pathname),d=this.components[L],b.shallow&&d&&this.route===L&&!R))return{...d,route:L}}if((0,H.isAPIRoute)(L))return handleHardNavigation({url:g,router:this}),new Promise(()=>{});let W=E||await this.fetchComponent(L).then(l=>({Component:l.page,styleSheets:l.styleSheets,__N_SSG:l.mod.__N_SSG,__N_SSP:l.mod.__N_SSP})),q=null==B?void 0:null==(F=B.response)?void 0:F.headers.get("x-middleware-skip"),z=W.__N_SSG||W.__N_SSP;q&&(null==B?void 0:B.dataHref)&&delete this.sdc[B.dataHref];let{props:G,cacheKey:V}=await this._getData(async()=>{if(z){if((null==B?void 0:B.json)&&!q)return{cacheKey:B.cacheKey,props:B.json};let l=(null==B?void 0:B.dataHref)?B.dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:f,query:h}),asPath:P,locale:S}),d=await fetchNextData({dataHref:l,isServerRender:this.isSsr,parseJSON:!0,inflightCache:q?{}:this.sdc,persistCache:!O,isPrefetch:!1,unstable_skipClientCache:j});return{cacheKey:d.cacheKey,props:d.json||{}}}return{headers:{},props:await this.getInitialProps(W.Component,{pathname:f,query:h,asPath:g,locale:S,locales:this.locales,defaultLocale:this.defaultLocale})}});return W.__N_SSP&&U.dataHref&&V&&delete this.sdc[V],this.isPreview||!W.__N_SSG||A||fetchNextData(Object.assign({},U,{isBackground:!0,persistCache:!1,inflightCache:this.sbc})).catch(()=>{}),G.pageProps=Object.assign({},G.pageProps),W.props=G,W.route=L,W.query=h,W.resolvedAs=P,this.components[L]=W,W}catch(l){return this.handleRouteInfoError((0,E.getProperError)(l),f,h,g,b)}}set(l,d,f){return this.state=l,this.sub(d,this.components["/_app"].Component,f)}beforePopState(l){this._bps=l}onlyAHashChange(l){if(!this.asPath)return!1;let[d,f]=this.asPath.split("#",2),[h,g]=l.split("#",2);return!!g&&d===h&&f===g||d===h&&f!==g}scrollToHash(l){let[,d=""]=l.split("#",2);(0,K.handleSmoothScroll)(()=>{if(""===d||"top"===d){window.scrollTo(0,0);return}let l=decodeURIComponent(d),f=document.getElementById(l);if(f){f.scrollIntoView();return}let h=document.getElementsByName(l)[0];h&&h.scrollIntoView()},{onlyHashChange:this.onlyAHashChange(l)})}urlIsNew(l){return this.asPath!==l}async prefetch(l,d,f){if(void 0===d&&(d=l),void 0===f&&(f={}),(0,G.isBot)(window.navigator.userAgent))return;let h=(0,A.parseRelativeUrl)(l),g=h.pathname,{pathname:P,query:b}=h,E=P,S=await this.pageLoader.getPageList(),w=d,R=void 0!==f.locale?f.locale||void 0:this.locale,O=await matchesMiddleware({asPath:d,locale:R,router:this});h.pathname=resolveDynamicRoute(h.pathname,S),(0,j.isDynamicRoute)(h.pathname)&&(P=h.pathname,h.pathname=P,Object.assign(b,(0,M.getRouteMatcher)((0,C.getRouteRegex)(h.pathname))((0,L.parsePath)(d).pathname)||{}),O||(l=(0,I.formatWithValidation)(h)));let x=await withMiddlewareEffects({fetchData:()=>fetchNextData({dataHref:this.pageLoader.getDataHref({href:(0,I.formatWithValidation)({pathname:E,query:b}),skipInterpolation:!0,asPath:w,locale:R}),hasMiddleware:!0,isServerRender:this.isSsr,parseJSON:!0,inflightCache:this.sdc,persistCache:!this.isPreview,isPrefetch:!0}),asPath:d,locale:R,router:this});if((null==x?void 0:x.effect.type)==="rewrite"&&(h.pathname=x.effect.resolvedHref,P=x.effect.resolvedHref,b={...b,...x.effect.parsedAs.query},w=x.effect.parsedAs.pathname,l=(0,I.formatWithValidation)(h)),(null==x?void 0:x.effect.type)==="redirect-external")return;let N=(0,y.removeTrailingSlash)(P);await this._bfl(d,w,f.locale,!0)&&(this.components[g]={__appRouter:!0}),await Promise.all([this.pageLoader._isSsg(N).then(d=>!!d&&fetchNextData({dataHref:(null==x?void 0:x.json)?null==x?void 0:x.dataHref:this.pageLoader.getDataHref({href:l,asPath:w,locale:R}),isServerRender:!1,parseJSON:!0,inflightCache:this.sdc,persistCache:!this.isPreview,isPrefetch:!0,unstable_skipClientCache:f.unstable_skipClientCache||f.priority&&!0}).then(()=>!1).catch(()=>!1)),this.pageLoader[f.priority?"loadPage":"prefetch"](N)])}async fetchComponent(l){let d=getCancelledHandler({route:l,router:this});try{let f=await this.pageLoader.loadPage(l);return d(),f}catch(l){throw d(),l}}_getData(l){let d=!1,cancel=()=>{d=!0};return this.clc=cancel,l().then(l=>{if(cancel===this.clc&&(this.clc=null),d){let l=Error("Loading initial props cancelled");throw l.cancelled=!0,l}return l})}_getFlightData(l){return fetchNextData({dataHref:l,isServerRender:!0,parseJSON:!1,inflightCache:this.sdc,persistCache:!1,isPrefetch:!1}).then(l=>{let{text:d}=l;return{data:d}})}getInitialProps(l,d){let{Component:f}=this.components["/_app"],h=this._wrapApp(f);return d.AppTree=h,(0,O.loadGetInitialProps)(f,{AppTree:h,Component:l,router:this,ctx:d})}get route(){return this.state.route}get pathname(){return this.state.pathname}get query(){return this.state.query}get asPath(){return this.state.asPath}get locale(){return this.state.locale}get isFallback(){return this.state.isFallback}get isPreview(){return this.state.isPreview}constructor(l,d,h,{initialProps:g,pageLoader:P,App:b,wrapApp:E,Component:S,err:w,subscription:R,isFallback:M,locale:C,locales:L,defaultLocale:x,domainLocales:N,isPreview:D}){this.sdc={},this.sbc={},this.isFirstPopStateEvent=!0,this._key=createKey(),this.onPopState=l=>{let d;let{isFirstPopStateEvent:f}=this;this.isFirstPopStateEvent=!1;let h=l.state;if(!h){let{pathname:l,query:d}=this;this.changeState("replaceState",(0,I.formatWithValidation)({pathname:(0,k.addBasePath)(l),query:d}),(0,O.getURL)());return}if(h.__NA){window.location.reload();return}if(!h.__N||f&&this.locale===h.options.locale&&h.as===this.asPath)return;let{url:g,as:y,options:P,key:b}=h;this._key=b;let{pathname:E}=(0,A.parseRelativeUrl)(g);(!this.isSsr||y!==(0,k.addBasePath)(this.asPath)||E!==(0,k.addBasePath)(this.pathname))&&(!this._bps||this._bps(h))&&this.change("replaceState",g,y,Object.assign({},P,{shallow:P.shallow&&this._shallow,locale:P.locale||this.defaultLocale,_h:0}),d)};let F=(0,y.removeTrailingSlash)(l);this.components={},"/_error"!==l&&(this.components[F]={Component:S,initial:!0,props:g,err:w,__N_SSG:g&&g.__N_SSG,__N_SSP:g&&g.__N_SSP}),this.components["/_app"]={Component:b,styleSheets:[]};{let{BloomFilter:l}=f(684),d={numItems:0,errorRate:.01,numBits:0,numHashes:null,bitArray:[]},h={numItems:0,errorRate:.01,numBits:0,numHashes:null,bitArray:[]};(null==d?void 0:d.numHashes)&&(this._bfl_s=new l(d.numItems,d.errorRate),this._bfl_s.import(d)),(null==h?void 0:h.numHashes)&&(this._bfl_d=new l(h.numItems,h.errorRate),this._bfl_d.import(h))}this.events=Router.events,this.pageLoader=P;let U=(0,j.isDynamicRoute)(l)&&self.__NEXT_DATA__.autoExport;if(this.basePath="",this.sub=R,this.clc=null,this._wrapApp=E,this.isSsr=!0,this.isLocaleDomain=!1,this.isReady=!!(self.__NEXT_DATA__.gssp||self.__NEXT_DATA__.gip||self.__NEXT_DATA__.isExperimentalCompile||self.__NEXT_DATA__.appGip&&!self.__NEXT_DATA__.gsp||!U&&!self.location.search),this.state={route:F,pathname:l,query:d,asPath:U?l:h,isPreview:!!D,locale:void 0,isFallback:M},this._initialMatchesMiddlewarePromise=Promise.resolve(!1),!h.startsWith("//")){let f={locale:C},g=(0,O.getURL)();this._initialMatchesMiddlewarePromise=matchesMiddleware({router:this,locale:C,asPath:g}).then(y=>(f._shouldResolveHref=h!==l,this.changeState("replaceState",y?g:(0,I.formatWithValidation)({pathname:(0,k.addBasePath)(l),query:d}),g,f),y))}window.addEventListener("popstate",this.onPopState)}};Router.events=(0,R.default)()},7699:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addLocale",{enumerable:!0,get:function(){return addLocale}});let h=f(6063),g=f(387);function addLocale(l,d,f,y){if(!d||d===f)return l;let P=l.toLowerCase();return!y&&((0,g.pathHasPrefix)(P,"/api")||(0,g.pathHasPrefix)(P,"/"+d.toLowerCase()))?l:(0,h.addPathPrefix)(l,"/"+d)}},6063:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addPathPrefix",{enumerable:!0,get:function(){return addPathPrefix}});let h=f(1156);function addPathPrefix(l,d){if(!l.startsWith("/")||!d)return l;let{pathname:f,query:g,hash:y}=(0,h.parsePath)(l);return""+d+f+g+y}},4233:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"addPathSuffix",{enumerable:!0,get:function(){return addPathSuffix}});let h=f(1156);function addPathSuffix(l,d){if(!l.startsWith("/")||!d)return l;let{pathname:f,query:g,hash:y}=(0,h.parsePath)(l);return""+f+d+g+y}},3090:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{normalizeAppPath:function(){return normalizeAppPath},normalizeRscURL:function(){return normalizeRscURL},normalizePostponedURL:function(){return normalizePostponedURL}});let h=f(504),g=f(6163);function normalizeAppPath(l){return(0,h.ensureLeadingSlash)(l.split("/").reduce((l,d,f,h)=>!d||(0,g.isGroupSegment)(d)||"@"===d[0]||("page"===d||"route"===d)&&f===h.length-1?l:l+"/"+d,""))}function normalizeRscURL(l){return l.replace(/\.rsc($|\?)/,"$1")}function normalizePostponedURL(l){let d=new URL(l),{pathname:f}=d;return f&&f.startsWith("/_next/postponed")?(d.pathname=f.substring(16)||"/",d.toString()):l}},106:function(l,d){"use strict";function asPathToSearchParams(l){return new URL(l,"http://n").searchParams}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"asPathToSearchParams",{enumerable:!0,get:function(){return asPathToSearchParams}})},7763:function(l,d){"use strict";function compareRouterStates(l,d){let f=Object.keys(l);if(f.length!==Object.keys(d).length)return!1;for(let h=f.length;h--;){let g=f[h];if("query"===g){let f=Object.keys(l.query);if(f.length!==Object.keys(d.query).length)return!1;for(let h=f.length;h--;){let g=f[h];if(!d.query.hasOwnProperty(g)||l.query[g]!==d.query[g])return!1}}else if(!d.hasOwnProperty(g)||l[g]!==d[g])return!1}return!0}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"compareRouterStates",{enumerable:!0,get:function(){return compareRouterStates}})},7841:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"formatNextPathnameInfo",{enumerable:!0,get:function(){return formatNextPathnameInfo}});let h=f(7425),g=f(6063),y=f(4233),P=f(7699);function formatNextPathnameInfo(l){let d=(0,P.addLocale)(l.pathname,l.locale,l.buildId?void 0:l.defaultLocale,l.ignorePrefix);return(l.buildId||!l.trailingSlash)&&(d=(0,h.removeTrailingSlash)(d)),l.buildId&&(d=(0,y.addPathSuffix)((0,g.addPathPrefix)(d,"/_next/data/"+l.buildId),"/"===l.pathname?"index.json":".json")),d=(0,g.addPathPrefix)(d,l.basePath),!l.buildId&&l.trailingSlash?d.endsWith("/")?d:(0,y.addPathSuffix)(d,"/"):(0,h.removeTrailingSlash)(d)}},4364:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{formatUrl:function(){return formatUrl},urlObjectKeys:function(){return P},formatWithValidation:function(){return formatWithValidation}});let h=f(1757),g=h._(f(5980)),y=/https?|ftp|gopher|file/;function formatUrl(l){let{auth:d,hostname:f}=l,h=l.protocol||"",P=l.pathname||"",b=l.hash||"",E=l.query||"",S=!1;d=d?encodeURIComponent(d).replace(/%3A/i,":")+"@":"",l.host?S=d+l.host:f&&(S=d+(~f.indexOf(":")?"["+f+"]":f),l.port&&(S+=":"+l.port)),E&&"object"==typeof E&&(E=String(g.urlQueryToSearchParams(E)));let w=l.search||E&&"?"+E||"";return h&&!h.endsWith(":")&&(h+=":"),l.slashes||(!h||y.test(h))&&!1!==S?(S="//"+(S||""),P&&"/"!==P[0]&&(P="/"+P)):S||(S=""),b&&"#"!==b[0]&&(b="#"+b),w&&"?"!==w[0]&&(w="?"+w),""+h+S+(P=P.replace(/[?#]/g,encodeURIComponent))+(w=w.replace("#","%23"))+b}let P=["auth","hash","host","hostname","href","path","pathname","port","protocol","query","search","slashes"];function formatWithValidation(l){return formatUrl(l)}},8356:function(l,d){"use strict";function getAssetPathFromRoute(l,d){void 0===d&&(d="");let f="/"===l?"/index":/^\/index(\/|$)/.test(l)?"/index"+l:""+l;return f+d}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return getAssetPathFromRoute}})},7007:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getNextPathnameInfo",{enumerable:!0,get:function(){return getNextPathnameInfo}});let h=f(1774),g=f(2531),y=f(387);function getNextPathnameInfo(l,d){var f,P;let{basePath:b,i18n:E,trailingSlash:S}=null!=(f=d.nextConfig)?f:{},w={pathname:l,trailingSlash:"/"!==l?l.endsWith("/"):S};b&&(0,y.pathHasPrefix)(w.pathname,b)&&(w.pathname=(0,g.removePathPrefix)(w.pathname,b),w.basePath=b);let R=w.pathname;if(w.pathname.startsWith("/_next/data/")&&w.pathname.endsWith(".json")){let l=w.pathname.replace(/^\/_next\/data\//,"").replace(/\.json$/,"").split("/"),f=l[0];w.buildId=f,R="index"!==l[1]?"/"+l.slice(1).join("/"):"/",!0===d.parseData&&(w.pathname=R)}if(E){let l=d.i18nProvider?d.i18nProvider.analyze(w.pathname):(0,h.normalizeLocalePath)(w.pathname,E.locales);w.locale=l.detectedLocale,w.pathname=null!=(P=l.pathname)?P:w.pathname,!l.detectedLocale&&w.buildId&&(l=d.i18nProvider?d.i18nProvider.analyze(R):(0,h.normalizeLocalePath)(R,E.locales)).detectedLocale&&(w.locale=l.detectedLocale)}return w}},3937:function(l,d){"use strict";function handleSmoothScroll(l,d){if(void 0===d&&(d={}),d.onlyHashChange){l();return}let f=document.documentElement,h=f.style.scrollBehavior;f.style.scrollBehavior="auto",d.dontForceLayout||f.getClientRects(),l(),f.style.scrollBehavior=h}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"handleSmoothScroll",{enumerable:!0,get:function(){return handleSmoothScroll}})},8410:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getSortedRoutes:function(){return h.getSortedRoutes},isDynamicRoute:function(){return g.isDynamicRoute}});let h=f(2677),g=f(9203)},2969:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"interpolateAs",{enumerable:!0,get:function(){return interpolateAs}});let h=f(2142),g=f(2839);function interpolateAs(l,d,f){let y="",P=(0,g.getRouteRegex)(l),b=P.groups,E=(d!==l?(0,h.getRouteMatcher)(P)(d):"")||f;y=l;let S=Object.keys(b);return S.every(l=>{let d=E[l]||"",{repeat:f,optional:h}=b[l],g="["+(f?"...":"")+l+"]";return h&&(g=(d?"":"/")+"["+g+"]"),f&&!Array.isArray(d)&&(d=[d]),(h||l in E)&&(y=y.replace(g,f?d.map(l=>encodeURIComponent(l)).join("/"):encodeURIComponent(d))||"/")})||(y=""),{params:S,result:y}}},5119:function(l,d){"use strict";function isBot(l){return/Googlebot|Mediapartners-Google|AdsBot-Google|googleweblight|Storebot-Google|Google-PageRenderer|Bingbot|BingPreview|Slurp|DuckDuckBot|baiduspider|yandex|sogou|LinkedInBot|bitlybot|tumblr|vkShare|quora link preview|facebookexternalhit|facebookcatalog|Twitterbot|applebot|redditbot|Slackbot|Discordbot|WhatsApp|SkypeUriPreview|ia_archiver/i.test(l)}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isBot",{enumerable:!0,get:function(){return isBot}})},9203:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isDynamicRoute",{enumerable:!0,get:function(){return isDynamicRoute}});let f=/\/\[[^/]+?\](?=\/|$)/;function isDynamicRoute(l){return f.test(l)}},2227:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isLocalURL",{enumerable:!0,get:function(){return isLocalURL}});let h=f(109),g=f(6864);function isLocalURL(l){if(!(0,h.isAbsoluteUrl)(l))return!0;try{let d=(0,h.getLocationOrigin)(),f=new URL(l,d);return f.origin===d&&(0,g.hasBasePath)(f.pathname)}catch(l){return!1}}},6455:function(l,d){"use strict";function omit(l,d){let f={};return Object.keys(l).forEach(h=>{d.includes(h)||(f[h]=l[h])}),f}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"omit",{enumerable:!0,get:function(){return omit}})},1156:function(l,d){"use strict";function parsePath(l){let d=l.indexOf("#"),f=l.indexOf("?"),h=f>-1&&(d<0||f-1?{pathname:l.substring(0,h?f:d),query:h?l.substring(f,d>-1?d:void 0):"",hash:d>-1?l.slice(d):""}:{pathname:l,query:"",hash:""}}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"parsePath",{enumerable:!0,get:function(){return parsePath}})},1748:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"parseRelativeUrl",{enumerable:!0,get:function(){return parseRelativeUrl}});let h=f(109),g=f(5980);function parseRelativeUrl(l,d){let f=new URL((0,h.getLocationOrigin)()),y=d?new URL(d,f):l.startsWith(".")?new URL(window.location.href):f,{pathname:P,searchParams:b,search:E,hash:S,href:w,origin:R}=new URL(l,y);if(R!==f.origin)throw Error("invariant: invalid relative URL, router received "+l);return{pathname:P,query:(0,g.searchParamsToUrlQuery)(b),search:E,hash:S,href:w.slice(f.origin.length)}}},387:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"pathHasPrefix",{enumerable:!0,get:function(){return pathHasPrefix}});let h=f(1156);function pathHasPrefix(l,d){if("string"!=typeof l)return!1;let{pathname:f}=(0,h.parsePath)(l);return f===d||f.startsWith(d+"/")}},5980:function(l,d){"use strict";function searchParamsToUrlQuery(l){let d={};return l.forEach((l,f)=>{void 0===d[f]?d[f]=l:Array.isArray(d[f])?d[f].push(l):d[f]=[d[f],l]}),d}function stringifyUrlQueryParam(l){return"string"!=typeof l&&("number"!=typeof l||isNaN(l))&&"boolean"!=typeof l?"":String(l)}function urlQueryToSearchParams(l){let d=new URLSearchParams;return Object.entries(l).forEach(l=>{let[f,h]=l;Array.isArray(h)?h.forEach(l=>d.append(f,stringifyUrlQueryParam(l))):d.set(f,stringifyUrlQueryParam(h))}),d}function assign(l){for(var d=arguments.length,f=Array(d>1?d-1:0),h=1;h{Array.from(d.keys()).forEach(d=>l.delete(d)),d.forEach((d,f)=>l.append(f,d))}),l}Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{searchParamsToUrlQuery:function(){return searchParamsToUrlQuery},urlQueryToSearchParams:function(){return urlQueryToSearchParams},assign:function(){return assign}})},2531:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removePathPrefix",{enumerable:!0,get:function(){return removePathPrefix}});let h=f(387);function removePathPrefix(l,d){if(!(0,h.pathHasPrefix)(l,d))return l;let f=l.slice(d.length);return f.startsWith("/")?f:"/"+f}},7425:function(l,d){"use strict";function removeTrailingSlash(l){return l.replace(/\/$/,"")||"/"}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"removeTrailingSlash",{enumerable:!0,get:function(){return removeTrailingSlash}})},2142:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getRouteMatcher",{enumerable:!0,get:function(){return getRouteMatcher}});let h=f(109);function getRouteMatcher(l){let{re:d,groups:f}=l;return l=>{let g=d.exec(l);if(!g)return!1;let decode=l=>{try{return decodeURIComponent(l)}catch(l){throw new h.DecodeError("failed to decode param")}},y={};return Object.keys(f).forEach(l=>{let d=f[l],h=g[d.pos];void 0!==h&&(y[l]=~h.indexOf("/")?h.split("/").map(l=>decode(l)):d.repeat?[decode(h)]:decode(h))}),y}}},2839:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{getRouteRegex:function(){return getRouteRegex},getNamedRouteRegex:function(){return getNamedRouteRegex},getNamedMiddlewareRegex:function(){return getNamedMiddlewareRegex}});let h=f(2407),g=f(997),y=f(7425);function parseParameter(l){let d=l.startsWith("[")&&l.endsWith("]");d&&(l=l.slice(1,-1));let f=l.startsWith("...");return f&&(l=l.slice(3)),{key:l,repeat:f,optional:d}}function getParametrizedRoute(l){let d=(0,y.removeTrailingSlash)(l).slice(1).split("/"),f={},P=1;return{parameterizedRoute:d.map(l=>{let d=h.INTERCEPTION_ROUTE_MARKERS.find(d=>l.startsWith(d)),y=l.match(/\[((?:\[.*\])|.+)\]/);if(d&&y){let{key:l,optional:h,repeat:b}=parseParameter(y[1]);return f[l]={pos:P++,repeat:b,optional:h},"/"+(0,g.escapeStringRegexp)(d)+"([^/]+?)"}if(!y)return"/"+(0,g.escapeStringRegexp)(l);{let{key:l,repeat:d,optional:h}=parseParameter(y[1]);return f[l]={pos:P++,repeat:d,optional:h},d?h?"(?:/(.+?))?":"/(.+?)":"/([^/]+?)"}}).join(""),groups:f}}function getRouteRegex(l){let{parameterizedRoute:d,groups:f}=getParametrizedRoute(l);return{re:RegExp("^"+d+"(?:/)?$"),groups:f}}function buildGetSafeRouteKey(){let l=0;return()=>{let d="",f=++l;for(;f>0;)d+=String.fromCharCode(97+(f-1)%26),f=Math.floor((f-1)/26);return d}}function getSafeKeyFromSegment(l){let{getSafeRouteKey:d,segment:f,routeKeys:h,keyPrefix:g}=l,{key:y,optional:P,repeat:b}=parseParameter(f),E=y.replace(/\W/g,"");g&&(E=""+g+E);let S=!1;return(0===E.length||E.length>30)&&(S=!0),isNaN(parseInt(E.slice(0,1)))||(S=!0),S&&(E=d()),g?h[E]=""+g+y:h[E]=""+y,b?P?"(?:/(?<"+E+">.+?))?":"/(?<"+E+">.+?)":"/(?<"+E+">[^/]+?)"}function getNamedParametrizedRoute(l,d){let f=(0,y.removeTrailingSlash)(l).slice(1).split("/"),P=buildGetSafeRouteKey(),b={};return{namedParameterizedRoute:f.map(l=>{let f=h.INTERCEPTION_ROUTE_MARKERS.some(d=>l.startsWith(d)),y=l.match(/\[((?:\[.*\])|.+)\]/);return f&&y?getSafeKeyFromSegment({getSafeRouteKey:P,segment:y[1],routeKeys:b,keyPrefix:d?"nxtI":void 0}):y?getSafeKeyFromSegment({getSafeRouteKey:P,segment:y[1],routeKeys:b,keyPrefix:d?"nxtP":void 0}):"/"+(0,g.escapeStringRegexp)(l)}).join(""),routeKeys:b}}function getNamedRouteRegex(l,d){let f=getNamedParametrizedRoute(l,d);return{...getRouteRegex(l),namedRegex:"^"+f.namedParameterizedRoute+"(?:/)?$",routeKeys:f.routeKeys}}function getNamedMiddlewareRegex(l,d){let{parameterizedRoute:f}=getParametrizedRoute(l),{catchAll:h=!0}=d;if("/"===f)return{namedRegex:"^/"+(h?".*":"")+"$"};let{namedParameterizedRoute:g}=getNamedParametrizedRoute(l,!1);return{namedRegex:"^"+g+(h?"(?:(/.*)?)":"")+"$"}}},2677:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"getSortedRoutes",{enumerable:!0,get:function(){return getSortedRoutes}});let UrlNode=class UrlNode{insert(l){this._insert(l.split("/").filter(Boolean),[],!1)}smoosh(){return this._smoosh()}_smoosh(l){void 0===l&&(l="/");let d=[...this.children.keys()].sort();null!==this.slugName&&d.splice(d.indexOf("[]"),1),null!==this.restSlugName&&d.splice(d.indexOf("[...]"),1),null!==this.optionalRestSlugName&&d.splice(d.indexOf("[[...]]"),1);let f=d.map(d=>this.children.get(d)._smoosh(""+l+d+"/")).reduce((l,d)=>[...l,...d],[]);if(null!==this.slugName&&f.push(...this.children.get("[]")._smoosh(l+"["+this.slugName+"]/")),!this.placeholder){let d="/"===l?"/":l.slice(0,-1);if(null!=this.optionalRestSlugName)throw Error('You cannot define a route with the same specificity as a optional catch-all route ("'+d+'" and "'+d+"[[..."+this.optionalRestSlugName+']]").');f.unshift(d)}return null!==this.restSlugName&&f.push(...this.children.get("[...]")._smoosh(l+"[..."+this.restSlugName+"]/")),null!==this.optionalRestSlugName&&f.push(...this.children.get("[[...]]")._smoosh(l+"[[..."+this.optionalRestSlugName+"]]/")),f}_insert(l,d,f){if(0===l.length){this.placeholder=!1;return}if(f)throw Error("Catch-all must be the last part of the URL.");let h=l[0];if(h.startsWith("[")&&h.endsWith("]")){let g=h.slice(1,-1),y=!1;if(g.startsWith("[")&&g.endsWith("]")&&(g=g.slice(1,-1),y=!0),g.startsWith("...")&&(g=g.substring(3),f=!0),g.startsWith("[")||g.endsWith("]"))throw Error("Segment names may not start or end with extra brackets ('"+g+"').");if(g.startsWith("."))throw Error("Segment names may not start with erroneous periods ('"+g+"').");function handleSlug(l,f){if(null!==l&&l!==f)throw Error("You cannot use different slug names for the same dynamic path ('"+l+"' !== '"+f+"').");d.forEach(l=>{if(l===f)throw Error('You cannot have the same slug name "'+f+'" repeat within a single dynamic path');if(l.replace(/\W/g,"")===h.replace(/\W/g,""))throw Error('You cannot have the slug names "'+l+'" and "'+f+'" differ only by non-word symbols within a single dynamic path')}),d.push(f)}if(f){if(y){if(null!=this.restSlugName)throw Error('You cannot use both an required and optional catch-all route at the same level ("[...'+this.restSlugName+']" and "'+l[0]+'" ).');handleSlug(this.optionalRestSlugName,g),this.optionalRestSlugName=g,h="[[...]]"}else{if(null!=this.optionalRestSlugName)throw Error('You cannot use both an optional and required catch-all route at the same level ("[[...'+this.optionalRestSlugName+']]" and "'+l[0]+'").');handleSlug(this.restSlugName,g),this.restSlugName=g,h="[...]"}}else{if(y)throw Error('Optional route parameters are not yet supported ("'+l[0]+'").');handleSlug(this.slugName,g),this.slugName=g,h="[]"}}this.children.has(h)||this.children.set(h,new UrlNode),this.children.get(h)._insert(l.slice(1),d,f)}constructor(){this.placeholder=!0,this.children=new Map,this.slugName=null,this.restSlugName=null,this.optionalRestSlugName=null}};function getSortedRoutes(l){let d=new UrlNode;return l.forEach(l=>d.insert(l)),d.smoosh()}},5612:function(l,d){"use strict";let f;Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return _default},setConfig:function(){return setConfig}});let _default=()=>f;function setConfig(l){f=l}},6163:function(l,d){"use strict";function isGroupSegment(l){return"("===l[0]&&l.endsWith(")")}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isGroupSegment",{enumerable:!0,get:function(){return isGroupSegment}})},8955:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"default",{enumerable:!0,get:function(){return SideEffect}});let h=f(7294),g=h.useLayoutEffect,y=h.useEffect;function SideEffect(l){let{headManager:d,reduceComponentsToState:f}=l;function emitChange(){if(d&&d.mountedInstances){let g=h.Children.toArray(Array.from(d.mountedInstances).filter(Boolean));d.updateHead(f(g,l))}}return g(()=>{var f;return null==d||null==(f=d.mountedInstances)||f.add(l.children),()=>{var f;null==d||null==(f=d.mountedInstances)||f.delete(l.children)}}),g(()=>(d&&(d._pendingUpdate=emitChange),()=>{d&&(d._pendingUpdate=emitChange)})),y(()=>(d&&d._pendingUpdate&&(d._pendingUpdate(),d._pendingUpdate=null),()=>{d&&d._pendingUpdate&&(d._pendingUpdate(),d._pendingUpdate=null)})),null}},109:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{WEB_VITALS:function(){return f},execOnce:function(){return execOnce},isAbsoluteUrl:function(){return isAbsoluteUrl},getLocationOrigin:function(){return getLocationOrigin},getURL:function(){return getURL},getDisplayName:function(){return getDisplayName},isResSent:function(){return isResSent},normalizeRepeatedSlashes:function(){return normalizeRepeatedSlashes},loadGetInitialProps:function(){return loadGetInitialProps},SP:function(){return g},ST:function(){return y},DecodeError:function(){return DecodeError},NormalizeError:function(){return NormalizeError},PageNotFoundError:function(){return PageNotFoundError},MissingStaticPage:function(){return MissingStaticPage},MiddlewareNotFoundError:function(){return MiddlewareNotFoundError},stringifyError:function(){return stringifyError}});let f=["CLS","FCP","FID","INP","LCP","TTFB"];function execOnce(l){let d,f=!1;return function(){for(var h=arguments.length,g=Array(h),y=0;yh.test(l);function getLocationOrigin(){let{protocol:l,hostname:d,port:f}=window.location;return l+"//"+d+(f?":"+f:"")}function getURL(){let{href:l}=window.location,d=getLocationOrigin();return l.substring(d.length)}function getDisplayName(l){return"string"==typeof l?l:l.displayName||l.name||"Unknown"}function isResSent(l){return l.finished||l.headersSent}function normalizeRepeatedSlashes(l){let d=l.split("?"),f=d[0];return f.replace(/\\/g,"/").replace(/\/\/+/g,"/")+(d[1]?"?"+d.slice(1).join("?"):"")}async function loadGetInitialProps(l,d){let f=d.res||d.ctx&&d.ctx.res;if(!l.getInitialProps)return d.ctx&&d.Component?{pageProps:await loadGetInitialProps(d.Component,d.ctx)}:{};let h=await l.getInitialProps(d);if(f&&isResSent(f))return h;if(!h){let d='"'+getDisplayName(l)+'.getInitialProps()" should resolve to an object. But found "'+h+'" instead.';throw Error(d)}return h}let g="undefined"!=typeof performance,y=g&&["mark","measure","getEntriesByName"].every(l=>"function"==typeof performance[l]);let DecodeError=class DecodeError extends Error{};let NormalizeError=class NormalizeError extends Error{};let PageNotFoundError=class PageNotFoundError extends Error{constructor(l){super(),this.code="ENOENT",this.name="PageNotFoundError",this.message="Cannot find module for page: "+l}};let MissingStaticPage=class MissingStaticPage extends Error{constructor(l,d){super(),this.message="Failed to load static file for page: "+l+" "+d}};let MiddlewareNotFoundError=class MiddlewareNotFoundError extends Error{constructor(){super(),this.code="ENOENT",this.message="Cannot find the middleware module"}};function stringifyError(l){return JSON.stringify({message:l.message,stack:l.stack})}},1905:function(l,d){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"warnOnce",{enumerable:!0,get:function(){return warnOnce}});let warnOnce=l=>{}},8018:function(l){var d,f,h,g,y,P,b,E,S,w,R,O,j,A,M,C,I,L,x,N,D,k,F,U,H,B,W,q,z,G,V,X,K,Y,Q,$,J,Z,ee,et,er,en,ea,eo,ei,el;(d={}).d=function(l,f){for(var h in f)d.o(f,h)&&!d.o(l,h)&&Object.defineProperty(l,h,{enumerable:!0,get:f[h]})},d.o=function(l,d){return Object.prototype.hasOwnProperty.call(l,d)},d.r=function(l){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(l,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(l,"__esModule",{value:!0})},void 0!==d&&(d.ab="//"),f={},d.r(f),d.d(f,{getCLS:function(){return F},getFCP:function(){return N},getFID:function(){return G},getINP:function(){return en},getLCP:function(){return eo},getTTFB:function(){return el},onCLS:function(){return F},onFCP:function(){return N},onFID:function(){return G},onINP:function(){return en},onLCP:function(){return eo},onTTFB:function(){return el}}),E=-1,S=function(l){addEventListener("pageshow",function(d){d.persisted&&(E=d.timeStamp,l(d))},!0)},w=function(){return window.performance&&performance.getEntriesByType&&performance.getEntriesByType("navigation")[0]},R=function(){var l=w();return l&&l.activationStart||0},O=function(l,d){var f=w(),h="navigate";return E>=0?h="back-forward-cache":f&&(h=document.prerendering||R()>0?"prerender":f.type.replace(/_/g,"-")),{name:l,value:void 0===d?-1:d,rating:"good",delta:0,entries:[],id:"v3-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12),navigationType:h}},j=function(l,d,f){try{if(PerformanceObserver.supportedEntryTypes.includes(l)){var h=new PerformanceObserver(function(l){d(l.getEntries())});return h.observe(Object.assign({type:l,buffered:!0},f||{})),h}}catch(l){}},A=function(l,d){var T=function t(f){"pagehide"!==f.type&&"hidden"!==document.visibilityState||(l(f),d&&(removeEventListener("visibilitychange",t,!0),removeEventListener("pagehide",t,!0)))};addEventListener("visibilitychange",T,!0),addEventListener("pagehide",T,!0)},M=function(l,d,f,h){var g,y;return function(P){var b;d.value>=0&&(P||h)&&((y=d.value-(g||0))||void 0===g)&&(g=d.value,d.delta=y,d.rating=(b=d.value)>f[1]?"poor":b>f[0]?"needs-improvement":"good",l(d))}},C=-1,I=function(){return"hidden"!==document.visibilityState||document.prerendering?1/0:0},L=function(){A(function(l){C=l.timeStamp},!0)},x=function(){return C<0&&(C=I(),L(),S(function(){setTimeout(function(){C=I(),L()},0)})),{get firstHiddenTime(){return C}}},N=function(l,d){d=d||{};var f,h=[1800,3e3],g=x(),y=O("FCP"),c=function(l){l.forEach(function(l){"first-contentful-paint"===l.name&&(b&&b.disconnect(),l.startTime-1&&l(d)},g=O("CLS",0),y=0,P=[],p=function(l){l.forEach(function(l){if(!l.hadRecentInput){var d=P[0],f=P[P.length-1];y&&l.startTime-f.startTime<1e3&&l.startTime-d.startTime<5e3?(y+=l.value,P.push(l)):(y=l.value,P=[l]),y>g.value&&(g.value=y,g.entries=P,h())}})},b=j("layout-shift",p);b&&(h=M(i,g,f,d.reportAllChanges),A(function(){p(b.takeRecords()),h(!0)}),S(function(){y=0,k=-1,h=M(i,g=O("CLS",0),f,d.reportAllChanges)}))},U={passive:!0,capture:!0},H=new Date,B=function(l,d){h||(h=d,g=l,y=new Date,z(removeEventListener),W())},W=function(){if(g>=0&&g1e12?new Date:performance.now())-l.timeStamp;"pointerdown"==l.type?(d=function(){B(g,l),h()},f=function(){h()},h=function(){removeEventListener("pointerup",d,U),removeEventListener("pointercancel",f,U)},addEventListener("pointerup",d,U),addEventListener("pointercancel",f,U)):B(g,l)}},z=function(l){["mousedown","keydown","touchstart","pointerdown"].forEach(function(d){return l(d,q,U)})},G=function(l,d){d=d||{};var f,y=[100,300],b=x(),E=O("FID"),v=function(l){l.startTimed.latency){if(f)f.entries.push(l),f.latency=Math.max(f.latency,l.duration);else{var h={id:l.interactionId,latency:l.duration,entries:[l]};et[h.id]=h,ee.push(h)}ee.sort(function(l,d){return d.latency-l.latency}),ee.splice(10).forEach(function(l){delete et[l.id]})}},en=function(l,d){d=d||{};var f=[200,500];$();var h,g=O("INP"),a=function(l){l.forEach(function(l){l.interactionId&&er(l),"first-input"!==l.entryType||ee.some(function(d){return d.entries.some(function(d){return l.duration===d.duration&&l.startTime===d.startTime})})||er(l)});var d,f=(d=Math.min(ee.length-1,Math.floor(Z()/50)),ee[d]);f&&f.latency!==g.value&&(g.value=f.latency,g.entries=f.entries,h())},y=j("event",a,{durationThreshold:d.durationThreshold||40});h=M(l,g,f,d.reportAllChanges),y&&(y.observe({type:"first-input",buffered:!0}),A(function(){a(y.takeRecords()),g.value<0&&Z()>0&&(g.value=0,g.entries=[]),h(!0)}),S(function(){ee=[],J=Q(),h=M(l,g=O("INP"),f,d.reportAllChanges)}))},ea={},eo=function(l,d){d=d||{};var f,h=[2500,4e3],g=x(),y=O("LCP"),c=function(l){var d=l[l.length-1];if(d){var h=d.startTime-R();hperformance.now())return;h.entries=[y],g(!0),S(function(){(g=M(l,h=O("TTFB",0),f,d.reportAllChanges))(!0)})}})},l.exports=f},9423:function(l,d){"use strict";function isAPIRoute(l){return"/api"===l||!!(null==l?void 0:l.startsWith("/api/"))}Object.defineProperty(d,"__esModule",{value:!0}),Object.defineProperty(d,"isAPIRoute",{enumerable:!0,get:function(){return isAPIRoute}})},676:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{default:function(){return isError},getProperError:function(){return getProperError}});let h=f(5585);function isError(l){return"object"==typeof l&&null!==l&&"name"in l&&"message"in l}function getProperError(l){return isError(l)?l:Error((0,h.isPlainObject)(l)?JSON.stringify(l):l+"")}},2407:function(l,d,f){"use strict";Object.defineProperty(d,"__esModule",{value:!0}),function(l,d){for(var f in d)Object.defineProperty(l,f,{enumerable:!0,get:d[f]})}(d,{INTERCEPTION_ROUTE_MARKERS:function(){return g},isInterceptionRouteAppPath:function(){return isInterceptionRouteAppPath},extractInterceptionRouteInformation:function(){return extractInterceptionRouteInformation}});let h=f(3090),g=["(..)(..)","(.)","(..)","(...)"];function isInterceptionRouteAppPath(l){return void 0!==l.split("/").find(l=>g.find(d=>l.startsWith(d)))}function extractInterceptionRouteInformation(l){let d,f,y;for(let h of l.split("/"))if(f=g.find(l=>h.startsWith(l))){[d,y]=l.split(f,2);break}if(!d||!f||!y)throw Error(`Invalid interception route: ${l}. Must be in the format //(..|...|..)(..)/`);switch(d=(0,h.normalizeAppPath)(d),f){case"(.)":y="/"===d?`/${y}`:d+"/"+y;break;case"(..)":if("/"===d)throw Error(`Invalid interception route: ${l}. Cannot use (..) marker at the root level, use (.) instead.`);y=d.split("/").slice(0,-1).concat(y).join("/");break;case"(...)":y="/"+y;break;case"(..)(..)":let P=d.split("/");if(P.length<=2)throw Error(`Invalid interception route: ${l}. Cannot use (..)(..) marker at the root level or one level up.`);y=P.slice(0,-2).concat(y).join("/");break;default:throw Error("Invariant: unexpected marker")}return{interceptingRoute:d,interceptedRoute:y}}},2431:function(){},8754:function(l,d,f){"use strict";function _interop_require_default(l){return l&&l.__esModule?l:{default:l}}f.r(d),f.d(d,{_:function(){return _interop_require_default},_interop_require_default:function(){return _interop_require_default}})},1757:function(l,d,f){"use strict";function _getRequireWildcardCache(l){if("function"!=typeof WeakMap)return null;var d=new WeakMap,f=new WeakMap;return(_getRequireWildcardCache=function(l){return l?f:d})(l)}function _interop_require_wildcard(l,d){if(!d&&l&&l.__esModule)return l;if(null===l||"object"!=typeof l&&"function"!=typeof l)return{default:l};var f=_getRequireWildcardCache(d);if(f&&f.has(l))return f.get(l);var h={},g=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var y in l)if("default"!==y&&Object.prototype.hasOwnProperty.call(l,y)){var P=g?Object.getOwnPropertyDescriptor(l,y):null;P&&(P.get||P.set)?Object.defineProperty(h,y,P):h[y]=l[y]}return h.default=l,f&&f.set(l,h),h}f.r(d),f.d(d,{_:function(){return _interop_require_wildcard},_interop_require_wildcard:function(){return _interop_require_wildcard}})}},function(l){var __webpack_exec__=function(d){return l(l.s=d)};l.O(0,[774],function(){return __webpack_exec__(3143),__webpack_exec__(6003)}),_N_E=l.O()}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[888],{6840:function(e,n,t){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_app",function(){return t(5913)}])},5913:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return MyApp}});var i=t(5893),c=t(9008),s=t.n(c);function MyApp(e){let{Component:n,pageProps:t}=e;return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsxs)(s(),{children:[(0,i.jsx)("meta",{charSet:"utf-8"}),(0,i.jsx)("meta",{httpEquiv:"X-UA-Compatible",content:"IE=edge"}),(0,i.jsx)("meta",{name:"viewport",content:"width=device-width,initial-scale=1,minimum-scale=1,maximum-scale=1,user-scalable=no"}),(0,i.jsx)("meta",{name:"description",content:"Description"}),(0,i.jsx)("meta",{name:"keywords",content:"Keywords"}),(0,i.jsx)("title",{children:"Laconic Test PWA"}),(0,i.jsx)("link",{rel:"manifest",href:"/manifest.json"}),(0,i.jsx)("link",{href:"/icons/favicon-16x16.png",rel:"icon",type:"image/png",sizes:"16x16"}),(0,i.jsx)("link",{href:"/icons/favicon-32x32.png",rel:"icon",type:"image/png",sizes:"32x32"}),(0,i.jsx)("link",{rel:"apple-touch-icon",href:"/apple-icon.png"}),(0,i.jsx)("meta",{name:"theme-color",content:"#317EFB"})]}),(0,i.jsx)(n,{...t})]})}t(415)},415:function(){},9008:function(e,n,t){e.exports=t(9201)}},function(e){var __webpack_exec__=function(n){return e(e.s=n)};e.O(0,[774,179],function(){return __webpack_exec__(6840),__webpack_exec__(9974)}),_N_E=e.O()}]);(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[405],{8312:function(e,c,n){(window.__NEXT_P=window.__NEXT_P||[]).push(["/",function(){return n(2627)}])},2627:function(e,c,n){"use strict";n.r(c),n.d(c,{default:function(){return Home}});var _=n(5893),s=n(6612),o=n.n(s);function Home(){return(0,_.jsxs)("div",{className:o().container,children:[(0,_.jsxs)("main",{className:o().main,children:[(0,_.jsxs)("h1",{className:o().title,children:["Welcome to ",(0,_.jsx)("a",{href:"https://www.laconic.com/",children:"Laconic!"})]}),(0,_.jsxs)("div",{className:o().grid,children:[(0,_.jsxs)("p",{className:o().card,children:["CONFIG1 has value: ","this string"]}),(0,_.jsxs)("p",{className:o().card,children:["CONFIG2 has value: ","this different string"]}),(0,_.jsxs)("p",{className:o().card,children:["WEBAPP_DEBUG has value: ","44ec6317-c911-47ff-86c1-d36c42ae9383"]})]})]}),(0,_.jsx)("footer",{className:o().footer,children:(0,_.jsxs)("a",{href:"https://www.laconic.com/",target:"_blank",rel:"noopener noreferrer",children:["Powered by \xa0",(0,_.jsxs)("svg",{width:"133",height:"24",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[(0,_.jsx)("path",{d:"M37.761 22.302h9.246v-2.704h-6.155v-17.9h-3.09v20.604ZM59.314 1.697h-5.126l-5.357 20.605h3.194l1.34-5.151h6.618l1.34 5.151h3.348L59.314 1.697Zm-5.306 12.878 2.679-10.663h.103l2.575 10.663h-5.357ZM74.337 9.682h3.606c0-5.873-1.88-8.397-6.259-8.397-4.61 0-6.593 3.194-6.593 10.689 0 7.52 1.983 10.74 6.593 10.74 4.379 0 6.259-2.447 6.285-8.139h-3.606c-.026 4.456-.567 5.563-2.679 5.563-2.42 0-3.013-1.622-2.987-8.164 0-6.516.592-8.14 2.987-8.113 2.112 0 2.653 1.159 2.653 5.82ZM86.689 1.285c4.687.026 6.696 3.245 6.696 10.715 0 7.469-2.009 10.688-6.696 10.714-4.714.026-6.723-3.194-6.723-10.714 0-7.521 2.01-10.74 6.723-10.715ZM83.572 12c0 6.516.618 8.139 3.117 8.139 2.472 0 3.09-1.623 3.09-8.14 0-6.541-.618-8.164-3.09-8.138-2.499.026-3.117 1.648-3.117 8.139ZM99.317 22.276l-3.09.026V1.697h5.434l5.074 16.793h.052V1.697h3.09v20.605h-5.099l-5.409-18.08h-.052v18.054ZM116.615 1.697h-3.091v20.605h3.091V1.697ZM128.652 9.682h3.606c0-5.873-1.881-8.397-6.259-8.397-4.61 0-6.594 3.194-6.594 10.689 0 7.52 1.984 10.74 6.594 10.74 4.378 0 6.259-2.447 6.284-8.139h-3.605c-.026 4.456-.567 5.563-2.679 5.563-2.421 0-3.014-1.622-2.988-8.164 0-6.516.593-8.14 2.988-8.113 2.112 0 2.653 1.159 2.653 5.82Z",fill:"#000000"}),(0,_.jsx)("path",{fillRule:"evenodd",clipRule:"evenodd",d:"M4.05 12.623A15.378 15.378 0 0 0 8.57 1.714C8.573 1.136 8.54.564 8.477 0H0v16.287c0 1.974.752 3.949 2.258 5.454A7.69 7.69 0 0 0 7.714 24L24 24v-8.477a15.636 15.636 0 0 0-1.715-.095c-4.258 0-8.115 1.73-10.908 4.523-2.032 1.981-5.291 1.982-7.299-.026-2.006-2.006-2.007-5.266-.029-7.302Zm18.192-10.86a6.004 6.004 0 0 0-8.485 0 6.003 6.003 0 0 0 0 8.484 6.003 6.003 0 0 0 8.485 0 6.002 6.002 0 0 0 0-8.485Z",fill:"#000000"})]})]})})]})}},6612:function(e){e.exports={container:"Home_container__d256j",main:"Home_main__VkIEL",footer:"Home_footer__yFiaX",title:"Home_title__hYX6j",description:"Home_description__uXNdx",code:"Home_code__VVrIr",grid:"Home_grid__AVljO",card:"Home_card__E5spL",logo:"Home_logo__IOQAX"}}},function(e){e.O(0,[774,888,179],function(){return e(e.s=8312)}),_N_E=e.O()}]);self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/":["static/css/3571059724d711eb.css","static/chunks/pages/index-08151452ae5af5e0.js"],"/_error":["static/chunks/pages/_error-ee5b5fb91d29d86f.js"],sortedPages:["/","/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();self.__SSG_MANIFEST=new Set,self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB(); \ No newline at end of file From 5e91c2224ecd90680064d9163010f6139d8480e2 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 8 Nov 2023 01:11:00 -0700 Subject: [PATCH 17/62] kind test stack (#629) --- .../deploy/compose/deploy_docker.py | 2 +- stack_orchestrator/deploy/deploy.py | 10 ++-- stack_orchestrator/deploy/deploy_types.py | 7 --- stack_orchestrator/deploy/deployer_factory.py | 6 +-- stack_orchestrator/deploy/deployment.py | 31 +------------ .../deploy/deployment_context.py | 46 +++++++++++++++++++ .../deploy/deployment_create.py | 18 ++++---- stack_orchestrator/deploy/k8s/cluster_info.py | 25 ++++++++-- stack_orchestrator/deploy/k8s/deploy_k8s.py | 25 ++++++++-- stack_orchestrator/deploy/k8s/helpers.py | 23 ++++++++-- stack_orchestrator/deploy/stack.py | 5 +- 11 files changed, 134 insertions(+), 64 deletions(-) create mode 100644 stack_orchestrator/deploy/deployment_context.py diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index 1e5f5f81..79ab1482 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -21,7 +21,7 @@ from stack_orchestrator.deploy.deployer import Deployer, DeployerException, Depl class DockerDeployer(Deployer): name: str = "compose" - def __init__(self, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name, compose_env_file=compose_env_file) diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 57fedebf..1c467067 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -28,6 +28,7 @@ from stack_orchestrator.util import include_exclude_check, get_parsed_stack_conf from stack_orchestrator.deploy.deployer import Deployer, DeployerException from stack_orchestrator.deploy.deployer_factory import getDeployer from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext +from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.deploy.deployment_create import create as deployment_create from stack_orchestrator.deploy.deployment_create import init as deployment_init from stack_orchestrator.deploy.deployment_create import setup as deployment_setup @@ -56,14 +57,17 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to): if deploy_to is None: deploy_to = "compose" - ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file, deploy_to) + ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to) # Subcommand is executed now, by the magic of click -def create_deploy_context(global_context, stack, include, exclude, cluster, env_file, deployer): +def create_deploy_context( + global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deployer): cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) + deployment_dir = deployment_context.deployment_dir if deployment_context else None # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ - deployer = getDeployer(deployer, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, + deployer = getDeployer(deployer, deployment_dir, compose_files=cluster_context.compose_files, + compose_project_name=cluster_context.cluster, compose_env_file=cluster_context.env_file) return DeployCommandContext(stack, cluster_context, deployer) diff --git a/stack_orchestrator/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py index b0c59380..fd14e90e 100644 --- a/stack_orchestrator/deploy/deploy_types.py +++ b/stack_orchestrator/deploy/deploy_types.py @@ -15,7 +15,6 @@ from typing import List from dataclasses import dataclass -from pathlib import Path from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.deploy.deployer import Deployer @@ -38,12 +37,6 @@ class DeployCommandContext: deployer: Deployer -@dataclass -class DeploymentContext: - deployment_dir: Path - command_context: DeployCommandContext - - @dataclass class VolumeMapping: host_path: str diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py index 262fa2dd..5d515418 100644 --- a/stack_orchestrator/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -26,10 +26,10 @@ def getDeployerConfigGenerator(type: str): print(f"ERROR: deploy-to {type} is not valid") -def getDeployer(type: str, compose_files, compose_project_name, compose_env_file): +def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file): if type == "compose" or type is None: - return DockerDeployer(compose_files, compose_project_name, compose_env_file) + return DockerDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file) elif type == "k8s": - return K8sDeployer(compose_files, compose_project_name, compose_env_file) + return K8sDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file) else: print(f"ERROR: deploy-to {type} is not valid") diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index c6656b01..e22d7dcc 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -18,34 +18,7 @@ from pathlib import Path import sys from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context -from stack_orchestrator.deploy.stack import Stack -from stack_orchestrator.deploy.spec import Spec - - -class DeploymentContext: - dir: Path - spec: Spec - stack: Stack - - def get_stack_file(self): - return self.dir.joinpath("stack.yml") - - def get_spec_file(self): - return self.dir.joinpath("spec.yml") - - def get_env_file(self): - return self.dir.joinpath("config.env") - - # TODO: implement me - def get_cluster_name(self): - return None - - def init(self, dir): - self.dir = dir - self.stack = Stack() - self.stack.init_from_file(self.get_stack_file()) - self.spec = Spec() - self.spec.init_from_file(self.get_spec_file()) +from stack_orchestrator.deploy.deployment_context import DeploymentContext @click.group() @@ -77,7 +50,7 @@ def make_deploy_context(ctx): stack_file_path = context.get_stack_file() env_file = context.get_env_file() cluster_name = context.get_cluster_name() - return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file, + return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file, context.spec.obj["deploy-to"]) diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py new file mode 100644 index 00000000..cd731394 --- /dev/null +++ b/stack_orchestrator/deploy/deployment_context.py @@ -0,0 +1,46 @@ + +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from pathlib import Path + +from stack_orchestrator.deploy.stack import Stack +from stack_orchestrator.deploy.spec import Spec + + +class DeploymentContext: + deployment_dir: Path + spec: Spec + stack: Stack + + def get_stack_file(self): + return self.deployment_dir.joinpath("stack.yml") + + def get_spec_file(self): + return self.deployment_dir.joinpath("spec.yml") + + def get_env_file(self): + return self.deployment_dir.joinpath("config.env") + + # TODO: implement me + def get_cluster_name(self): + return None + + def init(self, dir): + self.deployment_dir = dir + self.spec = Spec() + self.spec.init_from_file(self.get_spec_file()) + self.stack = Stack(self.spec.obj["stack"]) + self.stack.init_from_file(self.get_stack_file()) diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 8a2237a8..c00c0dc6 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -24,8 +24,9 @@ import sys from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths) -from stack_orchestrator.deploy.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand +from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator +from stack_orchestrator.deploy.deployment_context import DeploymentContext def _make_default_deployment_dir(): @@ -108,8 +109,8 @@ def _fixup_pod_file(pod, spec, compose_dir): pod["services"][container_name]["ports"] = container_ports -def _commands_plugin_paths(ctx: DeployCommandContext): - plugin_paths = get_plugin_code_paths(ctx.stack) +def _commands_plugin_paths(stack_name: str): + plugin_paths = get_plugin_code_paths(stack_name) ret = [p.joinpath("deploy", "commands.py") for p in plugin_paths] return ret @@ -123,7 +124,7 @@ def call_stack_deploy_init(deploy_command_context): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_paths = _commands_plugin_paths(deploy_command_context) + python_file_paths = _commands_plugin_paths(deploy_command_context.stack) ret = None init_done = False @@ -147,7 +148,7 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_paths = _commands_plugin_paths(deploy_command_context) + python_file_paths = _commands_plugin_paths(deploy_command_context.stack) for python_file_path in python_file_paths: if python_file_path.exists(): spec = util.spec_from_file_location("commands", python_file_path) @@ -162,7 +163,7 @@ def call_stack_deploy_create(deployment_context, extra_args): # Link with the python file in the stack # Call a function in it # If no function found, return None - python_file_paths = _commands_plugin_paths(deployment_context.command_context) + python_file_paths = _commands_plugin_paths(deployment_context.stack.name) for python_file_path in python_file_paths: if python_file_path.exists(): spec = util.spec_from_file_location("commands", python_file_path) @@ -311,7 +312,7 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): # This function fails with a useful error message if the file doens't exist parsed_spec = get_parsed_deployment_spec(spec_file) - stack_name = parsed_spec['stack'] + stack_name = parsed_spec["stack"] stack_file = get_stack_file_path(stack_name) parsed_stack = get_parsed_stack_config(stack_name) if global_options(ctx).debug: @@ -367,7 +368,8 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): # stack member here. deployment_command_context = ctx.obj deployment_command_context.stack = stack_name - deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context) + deployment_context = DeploymentContext() + deployment_context.init(Path(deployment_dir)) # Call the deployer to generate any deployer-specific files (e.g. for kind) deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"]) # TODO: make deployment_dir a Path above diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 5d785a01..deb0859d 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -18,7 +18,7 @@ from typing import Any, List, Set from stack_orchestrator.opts import opts from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files -from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names +from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path class ClusterInfo: @@ -50,11 +50,12 @@ class ClusterInfo: print(f"Volumes: {volumes}") for volume_name in volumes: spec = client.V1PersistentVolumeClaimSpec( - storage_class_name="standard", access_modes=["ReadWriteOnce"], + storage_class_name="manual", resources=client.V1ResourceRequirements( requests={"storage": "2Gi"} - ) + ), + volume_name=volume_name ) pvc = client.V1PersistentVolumeClaim( metadata=client.V1ObjectMeta(name=volume_name, @@ -64,6 +65,24 @@ class ClusterInfo: result.append(pvc) return result + def get_pvs(self): + result = [] + volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) + for volume_name in volumes: + spec = client.V1PersistentVolumeSpec( + storage_class_name="manual", + access_modes=["ReadWriteOnce"], + capacity={"storage": "2Gi"}, + host_path=client.V1HostPathVolumeSource(path=get_node_pv_mount_path(volume_name)) + ) + pv = client.V1PersistentVolume( + metadata=client.V1ObjectMeta(name=volume_name, + labels={"volume-label": volume_name}), + spec=spec, + ) + result.append(pv) + return result + # to suit the deployment, and also annotate the container specs to point at said volumes def get_deployment(self): containers = [] diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index a5167185..5181e163 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -30,12 +30,15 @@ class K8sDeployer(Deployer): k8s_namespace: str = "default" kind_cluster_name: str cluster_info : ClusterInfo + deployment_dir: Path - def __init__(self, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: if (opts.o.debug): + print(f"Deployment dir: {deployment_dir}") print(f"Compose files: {compose_files}") print(f"Project name: {compose_project_name}") print(f"Env file: {compose_env_file}") + self.deployment_dir = deployment_dir self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() self.cluster_info.int_from_pod_files(compose_files) @@ -47,16 +50,26 @@ class K8sDeployer(Deployer): def up(self, detach, services): # Create the kind cluster - # HACK: pass in the config file path here - create_cluster(self.kind_cluster_name, "./test-deployment-dir/kind-config.yml") + create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath("kind-config.yml")) self.connect_api() # Ensure the referenced containers are copied into kind load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) + + # Create the host-path-mounted PVs for this deployment + pvs = self.cluster_info.get_pvs() + for pv in pvs: + if opts.o.debug: + print(f"Sending this pv: {pv}") + pv_resp = self.core_api.create_persistent_volume(body=pv) + if opts.o.debug: + print("PVs created:") + print(f"{pv_resp}") + # Figure out the PVCs for this deployment pvcs = self.cluster_info.get_pvcs() for pvc in pvcs: if opts.o.debug: - print(f"Sending this: {pvc}") + print(f"Sending this pvc: {pvc}") pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace) if opts.o.debug: print("PVCs created:") @@ -65,7 +78,7 @@ class K8sDeployer(Deployer): deployment = self.cluster_info.get_deployment() # Create the k8s objects if opts.o.debug: - print(f"Sending this: {deployment}") + print(f"Sending this deployment: {deployment}") deployment_resp = self.apps_api.create_namespaced_deployment( body=deployment, namespace=self.k8s_namespace ) @@ -122,6 +135,8 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator): # Check the file isn't already there # Get the config file contents content = generate_kind_config(deployment_dir) + if opts.o.debug: + print(f"kind config is: {content}") config_file = deployment_dir.joinpath(self.config_file_name) # Write the file with open(config_file, "w") as output_file: diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index 8536a521..ad48957b 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -14,6 +14,7 @@ # along with this program. If not, see . from kubernetes import client +import os from pathlib import Path import subprocess from typing import Any, Set @@ -73,6 +74,10 @@ def named_volumes_from_pod_files(parsed_pod_files): return named_volumes +def get_node_pv_mount_path(volume_name: str): + return f"/mnt/{volume_name}" + + def volume_mounts_for_service(parsed_pod_files, service): result = [] # Find the service @@ -119,6 +124,14 @@ def _get_host_paths_for_volumes(parsed_pod_files): return result +def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Path: + if os.path.isabs(data_mount_path): + return data_mount_path + else: + # Python Path voodo that looks pretty odd: + return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve() + + def parsed_pod_files_map_from_file_names(pod_files): parsed_pod_yaml_map : Any = {} for pod_file in pod_files: @@ -130,9 +143,12 @@ def parsed_pod_files_map_from_file_names(pod_files): return parsed_pod_yaml_map -def _generate_kind_mounts(parsed_pod_files): +def _generate_kind_mounts(parsed_pod_files, deployment_dir): volume_definitions = [] volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) + # Note these paths are relative to the location of the pod files (at present) + # So we need to fix up to make them correct and absolute because kind assumes + # relative to the cwd. for pod in parsed_pod_files: parsed_pod_file = parsed_pod_files[pod] if "services" in parsed_pod_file: @@ -145,7 +161,8 @@ def _generate_kind_mounts(parsed_pod_files): # Looks like: test-data:/data (volume_name, mount_path) = mount_string.split(":") volume_definitions.append( - f" - hostPath: {volume_host_path_map[volume_name]}\n containerPath: /var/local-path-provisioner" + f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n" + f" containerPath: {get_node_pv_mount_path(volume_name)}" ) return ( "" if len(volume_definitions) == 0 else ( @@ -201,7 +218,7 @@ def generate_kind_config(deployment_dir: Path): pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()] parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files) port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map) - mounts_yml = _generate_kind_mounts(parsed_pod_files_map) + mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir) return ( "kind: Cluster\n" "apiVersion: kind.x-k8s.io/v1alpha4\n" diff --git a/stack_orchestrator/deploy/stack.py b/stack_orchestrator/deploy/stack.py index e0d33851..1a493534 100644 --- a/stack_orchestrator/deploy/stack.py +++ b/stack_orchestrator/deploy/stack.py @@ -20,10 +20,11 @@ from stack_orchestrator.util import get_yaml class Stack: + name: str obj: typing.Any - def __init__(self) -> None: - pass + def __init__(self, name: str) -> None: + self.name = name def init_from_file(self, file_path: Path): with file_path: From ce587457d753016dd5aa5bf528a6f3217d85e6a0 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 8 Nov 2023 17:53:46 -0700 Subject: [PATCH 18/62] Add env var support for k8s (#634) --- requirements.txt | 1 + stack_orchestrator/deploy/deploy_types.py | 7 ++++++- stack_orchestrator/deploy/k8s/cluster_info.py | 9 ++++++++- stack_orchestrator/deploy/k8s/deploy_k8s.py | 2 +- stack_orchestrator/deploy/k8s/helpers.py | 14 +++++++++++++- 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index bf4845a1..bbf97b4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ python-decouple>=3.8 +python-dotenv==1.0.0 GitPython>=3.1.32 tqdm>=4.65.0 python-on-whales>=0.64.0 diff --git a/stack_orchestrator/deploy/deploy_types.py b/stack_orchestrator/deploy/deploy_types.py index fd14e90e..f97b2649 100644 --- a/stack_orchestrator/deploy/deploy_types.py +++ b/stack_orchestrator/deploy/deploy_types.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from typing import List +from typing import List, Mapping from dataclasses import dataclass from stack_orchestrator.command_types import CommandOptions from stack_orchestrator.deploy.deployer import Deployer @@ -59,3 +59,8 @@ class LaconicStackSetupCommand: @dataclass class LaconicStackCreateCommand: network_dir: str + + +@dataclass +class DeployEnvVars: + map: Mapping[str, str] diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index deb0859d..9275db2b 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -19,6 +19,8 @@ from typing import Any, List, Set from stack_orchestrator.opts import opts from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path +from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map +from stack_orchestrator.deploy.deploy_types import DeployEnvVars class ClusterInfo: @@ -26,11 +28,12 @@ class ClusterInfo: image_set: Set[str] = set() app_name: str = "test-app" deployment_name: str = "test-deployment" + environment_variables: DeployEnvVars def __init__(self) -> None: pass - def int_from_pod_files(self, pod_files: List[str]): + def int(self, pod_files: List[str], compose_env_file): self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods for pod_name in self.parsed_pod_yaml_map: @@ -42,6 +45,9 @@ class ClusterInfo: self.image_set.add(image) if opts.o.debug: print(f"image_set: {self.image_set}") + self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) + if (opts.o.debug): + print(f"Env vars: {self.environment_variables.map}") def get_pvcs(self): result = [] @@ -97,6 +103,7 @@ class ClusterInfo: container = client.V1Container( name=container_name, image=image, + env=envs_from_environment_variables_map(self.environment_variables.map), ports=[client.V1ContainerPort(container_port=80)], volume_mounts=volume_mounts, resources=client.V1ResourceRequirements( diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 5181e163..bc256b6b 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -41,7 +41,7 @@ class K8sDeployer(Deployer): self.deployment_dir = deployment_dir self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() - self.cluster_info.int_from_pod_files(compose_files) + self.cluster_info.int(compose_files, compose_env_file) def connect_api(self): config.load_kube_config(context=f"kind-{self.kind_cluster_name}") diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index ad48957b..db1ef075 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -14,10 +14,11 @@ # along with this program. If not, see . from kubernetes import client +from dotenv import dotenv_values import os from pathlib import Path import subprocess -from typing import Any, Set +from typing import Any, Set, Mapping, List from stack_orchestrator.opts import opts from stack_orchestrator.util import get_yaml @@ -194,6 +195,13 @@ def _generate_kind_port_mappings(parsed_pod_files): ) +def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]: + result = [] + for env_var, env_val in map.items(): + result.append(client.V1EnvVar(env_var, env_val)) + return result + + # This needs to know: # The service ports for the cluster # The bind mounted volumes for the cluster @@ -227,3 +235,7 @@ def generate_kind_config(deployment_dir: Path): f"{port_mappings_yml}\n" f"{mounts_yml}\n" ) + + +def env_var_map_from_file(file: Path) -> Mapping[str, str]: + return dotenv_values(file) From a27cf86748514046d97721f40e68e00e9d142ed5 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 8 Nov 2023 19:12:48 -0700 Subject: [PATCH 19/62] Add basic k8s test (#635) * Add CI job * Add basic k8s test --- .gitea/workflows/test-k8s-deploy.yml | 55 ++++++++++++++++++++ tests/k8s-deploy/run-deploy-test.sh | 76 ++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 .gitea/workflows/test-k8s-deploy.yml create mode 100755 tests/k8s-deploy/run-deploy-test.sh diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml new file mode 100644 index 00000000..47583174 --- /dev/null +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -0,0 +1,55 @@ +name: K8s Deploy Test + +on: + pull_request: + branches: '*' + push: + branches: + - main + - ci-test + paths-ignore: + - '.gitea/workflows/triggers/*' + +# Needed until we can incorporate docker startup into the executor container +env: + DOCKER_HOST: unix:///var/run/dind.sock + +jobs: + test: + name: "Run deploy test suite" + runs-on: ubuntu-latest + steps: + - name: "Clone project repository" + uses: actions/checkout@v3 + # At present the stock setup-python action fails on Linux/aarch64 + # Conditional steps below workaroud this by using deadsnakes for that case only + - name: "Install Python for ARM on Linux" + if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }} + uses: deadsnakes/action@v3.0.1 + with: + python-version: '3.8' + - name: "Install Python cases other than ARM on Linux" + if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }} + uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: "Print Python version" + run: python3 --version + - name: "Install shiv" + run: pip install shiv + - name: "Generate build version file" + run: ./scripts/create_build_tag_file.sh + - name: "Build local shiv package" + run: ./scripts/build_shiv_package.sh + - name: Start dockerd # Also needed until we can incorporate into the executor + run: | + dockerd -H $DOCKER_HOST --userland-proxy=false & + sleep 5 + - name: "Install Go" + uses: actions/setup-go@v4 + with: + go-version: '1.21' + - name: "Install Kind" + run: go install sigs.k8s.io/kind@v0.20.0 + - name: "Run k8s deploy tests" + run: ./tests/k8s-deploy/run-deploy-test.sh diff --git a/tests/k8s-deploy/run-deploy-test.sh b/tests/k8s-deploy/run-deploy-test.sh new file mode 100755 index 00000000..91c7890c --- /dev/null +++ b/tests/k8s-deploy/run-deploy-test.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +# Note: eventually this test should be folded into ../deploy/ +# but keeping it separate for now for convenience +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +# Dump environment variables for debugging +echo "Environment variables:" +env +# Set a non-default repo dir +export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir +echo "Testing this package: $TEST_TARGET_SO" +echo "Test version command" +reported_version_string=$( $TEST_TARGET_SO version ) +echo "Version reported is: ${reported_version_string}" +echo "Cloning repositories into: $CERC_REPO_BASE_DIR" +rm -rf $CERC_REPO_BASE_DIR +mkdir -p $CERC_REPO_BASE_DIR +# Test basic stack-orchestrator deploy +test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir +test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml +$TEST_TARGET_SO --stack test deploy --deploy-to k8s init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED +# Check the file now exists +if [ ! -f "$test_deployment_spec" ]; then + echo "deploy init test: spec file not present" + echo "deploy init test: FAILED" + exit 1 +fi +echo "deploy init test: passed" +$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir +# Check the deployment dir exists +if [ ! -d "$test_deployment_dir" ]; then + echo "deploy create test: deployment directory not present" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create test: passed" +# Check the file writted by the create command in the stack now exists +if [ ! -f "$test_deployment_dir/create-file" ]; then + echo "deploy create test: create output file not present" + echo "deploy create test: FAILED" + exit 1 +fi +# And has the right content +create_file_content=$(<$test_deployment_dir/create-file) +if [ ! "$create_file_content" == "create-command-output-data" ]; then + echo "deploy create test: create output file contents not correct" + echo "deploy create test: FAILED" + exit 1 +fi +echo "deploy create output file test: passed" +# Try to start the deployment +$TEST_TARGET_SO deployment --dir $test_deployment_dir start +# TODO: add a check to see if the container is up +# Sleep because k8s not up yet +sleep 30 +# Check logs command works +log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs ) +if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then + echo "deployment logs test: passed" +else + echo "deployment logs test: FAILED" + exit 1 +fi +# Check the config variable CERC_TEST_PARAM_1 was passed correctly +if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then + echo "deployment config test: passed" +else + echo "deployment config test: FAILED" + exit 1 +fi +# Stop and clean up +$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes +echo "Test passed" From 8384e95049056e7784d24c289c0bee20b5358db8 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 8 Nov 2023 19:42:19 -0700 Subject: [PATCH 20/62] Add debug commands to test job (#636) --- .gitea/workflows/test-k8s-deploy.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml index 47583174..84cce91a 100644 --- a/.gitea/workflows/test-k8s-deploy.yml +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -51,5 +51,5 @@ jobs: go-version: '1.21' - name: "Install Kind" run: go install sigs.k8s.io/kind@v0.20.0 - - name: "Run k8s deploy tests" - run: ./tests/k8s-deploy/run-deploy-test.sh + - name: "Debug Kind" + run: kind create cluster --retain && docker logs kind-control-plane From 042b413598df5dd691853be6ac12b5c90f336f87 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 8 Nov 2023 23:44:48 -0600 Subject: [PATCH 21/62] Support the case where webpack config is already present next.config.js (#631) * Support the case where webpack config is already present next.config.js * Update scripts for experimental-compile/experimental-generate --- stack_orchestrator/build/build_webapp.py | 1 + .../cerc-nextjs-base/Dockerfile | 2 +- .../scripts/apply-runtime-env.sh | 4 +- .../cerc-nextjs-base/scripts/build-app.sh | 73 +++++++++++++------ .../cerc-nextjs-base/scripts/find-env.sh | 5 ++ .../scripts/start-serving-app.sh | 25 ++++++- 6 files changed, 84 insertions(+), 26 deletions(-) diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py index f4668c5d..7a656ae7 100644 --- a/stack_orchestrator/build/build_webapp.py +++ b/stack_orchestrator/build/build_webapp.py @@ -65,6 +65,7 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): # Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir. + container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true" container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo) container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, base_container.replace("/", "-"), diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile index 147cec29..435d8c73 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -1,6 +1,6 @@ # Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile # [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster -ARG VARIANT=18-bullseye +ARG VARIANT=20-bullseye FROM node:${VARIANT} ARG USERNAME=node diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh index ba1cd17d..793333c3 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh @@ -25,12 +25,12 @@ if [ -f ".env" ]; then fi for f in $(find "$TRG_DIR" -regex ".*.[tj]sx?$" -type f | grep -v 'node_modules'); do - for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o '^"CERC_RUNTIME_ENV[^\"]+"$'); do + for e in $(cat "${f}" | tr -s '[:blank:]' '\n' | tr -s '[{},();]' '\n' | egrep -o '^"CERC_RUNTIME_ENV_[^\"]+"'); do orig_name=$(echo -n "${e}" | sed 's/"//g') cur_name=$(echo -n "${orig_name}" | sed 's/CERC_RUNTIME_ENV_//g') cur_val=$(echo -n "\$${cur_name}" | envsubst) esc_val=$(sed 's/[&/\]/\\&/g' <<< "$cur_val") - echo "$cur_name=$cur_val" + echo "$f: $cur_name=$cur_val" sed -i "s/$orig_name/$esc_val/g" $f done done diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh index 9277abc6..c2115f6a 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -10,21 +10,25 @@ WORK_DIR="${1:-/app}" cd "${WORK_DIR}" || exit 1 -cp next.config.js next.config.dist +if [ ! -f "next.config.dist" ]; then + cp next.config.js next.config.dist +fi + +which js-beautify >/dev/null +if [ $? -ne 0 ]; then + npm i -g js-beautify +fi -npm i -g js-beautify js-beautify next.config.dist > next.config.js -npm install - -CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }') -MOD_EXPORTS_LINE=$(grep -n 'module.exports' next.config.js | cut -d':' -f1) - -head -$(( ${MOD_EXPORTS_LINE} - 1 )) next.config.js > next.config.js.1 - -cat > next.config.js.2 < next.config.js.0 < next.config.js.1 < next.config.js.3 +CONFIG_LINES=$(wc -l next.config.js | awk '{ print $1 }') +ENV_LINE=$(grep -n 'env:' next.config.js | cut -d':' -f1) +WEBPACK_CONF_LINE=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d':' -f1) +NEXT_SECTION_ADJUSTMENT=0 -cat > next.config.js.4 < { - config.plugins.push(new webpack.DefinePlugin(envMap)); - return config; - }, +if [ -n "$WEBPACK_CONF_LINE" ]; then + WEBPACK_CONF_VAR=$(egrep -n 'webpack:\s+\([^,]+,' next.config.js | cut -d',' -f1 | cut -d'(' -f2) + head -$(( ${WEBPACK_CONF_LINE} )) next.config.js > next.config.js.2 + cat > next.config.js.3 < next.config.js.2 + cat > next.config.js.3 < { + config.plugins.push(new webpack.DefinePlugin(envMap)); + return config; + }, +EOF + NEXT_SECTION_ADJUSTMENT=2 + NEXT_SECTION_LINE=$ENV_LINE +else + echo "WARNING: Cannot find location to insert environment variable map in next.config.js" 1>&2 + rm -f next.config.js.* + NEXT_SECTION_LINE=0 +fi -tail -$(( ${CONFIG_LINES} - ${MOD_EXPORTS_LINE} + 1 )) next.config.js | grep -v 'process\.env\.' > next.config.js.5 +tail -$(( ${CONFIG_LINES} - ${NEXT_SECTION_LINE} + ${NEXT_SECTION_ADJUSTMENT} )) next.config.js > next.config.js.5 -cat next.config.js.* | js-beautify > next.config.js +cat next.config.js.* | sed 's/^ *//g' | js-beautify | grep -v 'process\.\env\.' | js-beautify > next.config.js rm next.config.js.* "${SCRIPT_DIR}/find-env.sh" "$(pwd)" > .env-list.json -npm run build -rm .env-list.json \ No newline at end of file +if [ ! -f "package.dist" ]; then + cp package.json package.dist +fi + +cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq '.scripts.cerc_generate = "next experimental-generate"' > package.json + +npm install || exit 1 +npm run cerc_compile || exit 1 + +exit 0 diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh index 0c0e87c9..59cb3d49 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/find-env.sh @@ -20,5 +20,10 @@ for d in $(find . -maxdepth 1 -type d | grep -v '\./\.' | grep '/' | cut -d'/' - done done +NEXT_CONF="next.config.js next.config.dist" +for f in $NEXT_CONF; do + cat "$f" | tr -s '[:blank:]' '\n' | tr -s '[{},()]' '\n' | egrep -o 'process.env.[A-Za-z0-9_]+' >> $TMPF +done + cat $TMPF | sort -u | jq --raw-input . | jq --slurp . rm -f $TMPF diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh index abe72935..61664c68 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -8,6 +8,27 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}" cd "$CERC_WEBAPP_FILES_DIR" -rm -rf .next-r "$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r -npm start .next-r -p ${CERC_LISTEN_PORT:-3000} +mv .next .next.old +mv .next-r/.next . + +if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then + jq -e '.scripts.cerc_generate' package.json >/dev/null + if [ $? -eq 0 ]; then + npm run cerc_generate > gen.out 2>&1 & + tail -n0 -f gen.out | sed '/rendered as static HTML/ q' + count=0 + while [ $count -lt 10 ]; do + sleep 1 + ps -ef | grep 'node' | grep 'next' | grep 'generate' >/dev/null + if [ $? -ne 0 ]; then + break + else + count=$((count + 1)) + fi + done + kill $(ps -ef |grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null + fi +fi + +npm start . -p ${CERC_LISTEN_PORT:-3000} From 1072fc98c3d6d1a82f3a8ff464356dc6af705d97 Mon Sep 17 00:00:00 2001 From: iskay Date: Fri, 10 Nov 2023 20:05:22 +0000 Subject: [PATCH 22/62] update fixturenet-optimism --- .../docker-compose-fixturenet-optimism.yml | 117 ++++++------ .../fixturenet-optimism/generate-l2-config.sh | 37 ---- .../optimism-contracts/deploy-contracts.sh | 172 ++++++++++++++++++ .../optimism-contracts/run.sh | 131 ------------- .../optimism-contracts/update-config.js | 36 ---- .../config/fixturenet-optimism/run-geth.sh | 155 ++++++++++++++++ .../fixturenet-optimism/run-op-batcher.sh | 26 +-- .../config/fixturenet-optimism/run-op-geth.sh | 84 +++------ .../config/fixturenet-optimism/run-op-node.sh | 51 ++++-- .../fixturenet-optimism/run-op-proposer.sh | 38 ++-- .../cerc-optimism-contracts/Dockerfile | 2 +- .../cerc-optimism-op-batcher/Dockerfile | 6 +- .../cerc-optimism-op-node/Dockerfile | 4 +- .../cerc-optimism-op-proposer/Dockerfile | 6 +- .../data/stacks/fixturenet-optimism/README.md | 113 ++++++++++-- .../fixturenet-optimism/deploy/commands.py | 37 ++++ .../stacks/fixturenet-optimism/l2-only.md | 79 +++++--- .../data/stacks/fixturenet-optimism/stack.yml | 4 +- 18 files changed, 656 insertions(+), 442 deletions(-) delete mode 100755 stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh create mode 100755 stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh delete mode 100755 stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh delete mode 100644 stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js create mode 100755 stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh create mode 100644 stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py diff --git a/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml index ddf7e290..fe1eac50 100644 --- a/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-optimism.yml @@ -6,8 +6,8 @@ services: # Deploys the L1 smart contracts (outputs to volume l1_deployment) fixturenet-optimism-contracts: restart: on-failure - hostname: fixturenet-optimism-contracts image: cerc/optimism-contracts:local + hostname: fixturenet-optimism-contracts env_file: - ../config/fixturenet-optimism/l1-params.env environment: @@ -17,27 +17,49 @@ services: CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} CERC_L1_ADDRESS: ${CERC_L1_ADDRESS} CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY} - CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2} - CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2} - # Waits for L1 endpoint to be up before running the script - command: | - "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh" volumes: - ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh - - ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts - - ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts - - ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts - - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js - - ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh + - ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh - l2_accounts:/l2-accounts - - l1_deployment:/app/packages/contracts-bedrock + - l1_deployment:/l1-deployment + - l2_config:/l2-config + # Waits for L1 endpoint to be up before running the contract deploy script + command: | + "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh" + + # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) + op-geth: + restart: always + image: cerc/optimism-l2geth:local + hostname: op-geth + depends_on: + op-node: + condition: service_started + volumes: + - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh + - l2_config:/l2-config:ro + - l2_accounts:/l2-accounts:ro + - l2_geth_data:/datadir + entrypoint: "sh" + command: "/run-op-geth.sh" + ports: + - "8545" + - "8546" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost:8545"] + interval: 30s + timeout: 10s + retries: 100 + start_period: 10s extra_hosts: - "host.docker.internal:host-gateway" - # Generates the config files required for L2 (outputs to volume l2_config) - op-node-l2-config-gen: - restart: on-failure + # Runs the L2 consensus client (Sequencer node) + # Generates the L2 config files if not already present (outputs to volume l2_config) + op-node: + restart: always image: cerc/optimism-op-node:local + hostname: op-node depends_on: fixturenet-optimism-contracts: condition: service_completed_successfully @@ -47,61 +69,19 @@ services: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_L1_RPC: ${CERC_L1_RPC} volumes: - - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh - - l1_deployment:/contracts-bedrock:ro - - l2_config:/app - command: ["sh", "/app/generate-l2-config.sh"] - extra_hosts: - - "host.docker.internal:host-gateway" - - # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) - op-geth: - restart: always - image: cerc/optimism-l2geth:local - depends_on: - op-node-l2-config-gen: - condition: service_started - volumes: - - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh - - l2_config:/op-node:ro + - ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh + - l1_deployment:/l1-deployment:ro + - l2_config:/l2-config - l2_accounts:/l2-accounts:ro - - l2_geth_data:/datadir entrypoint: "sh" - command: "/run-op-geth.sh" + command: "/run-op-node.sh" ports: - - "0.0.0.0:8545:8545" - - "0.0.0.0:8546:8546" - healthcheck: - test: ["CMD", "nc", "-vz", "localhost:8545"] - interval: 30s - timeout: 10s - retries: 10 - start_period: 10s - - # Runs the L2 consensus client (Sequencer node) - op-node: - restart: always - image: cerc/optimism-op-node:local - depends_on: - op-geth: - condition: service_healthy - env_file: - - ../config/fixturenet-optimism/l1-params.env - environment: - CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} - CERC_L1_RPC: ${CERC_L1_RPC} - volumes: - - ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh - - l2_config:/op-node-data:ro - - l2_accounts:/l2-accounts:ro - command: ["sh", "/app/run-op-node.sh"] - ports: - - "0.0.0.0:8547:8547" + - "8547" healthcheck: test: ["CMD", "nc", "-vz", "localhost:8547"] interval: 30s timeout: 10s - retries: 10 + retries: 100 start_period: 10s extra_hosts: - "host.docker.internal:host-gateway" @@ -110,6 +90,7 @@ services: op-batcher: restart: always image: cerc/optimism-op-batcher:local + hostname: op-batcher depends_on: op-node: condition: service_healthy @@ -129,7 +110,7 @@ services: command: | "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh" ports: - - "127.0.0.1:8548:8548" + - "8548" extra_hosts: - "host.docker.internal:host-gateway" @@ -137,25 +118,29 @@ services: op-proposer: restart: always image: cerc/optimism-op-proposer:local + hostname: op-proposer depends_on: op-node: condition: service_healthy + op-geth: + condition: service_healthy env_file: - ../config/fixturenet-optimism/l1-params.env environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_L1_RPC: ${CERC_L1_RPC} + CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID} volumes: - ../config/network/wait-for-it.sh:/wait-for-it.sh - ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh - - l1_deployment:/contracts-bedrock:ro + - l1_deployment:/l1-deployment:ro - l2_accounts:/l2-accounts:ro entrypoint: ["sh", "-c"] # Waits for L1 endpoint to be up before running the proposer command: | "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh" ports: - - "127.0.0.1:8560:8560" + - "8560" extra_hosts: - "host.docker.internal:host-gateway" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh b/stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh deleted file mode 100755 index b10048d2..00000000 --- a/stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -# Check existing config if it exists -if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then - echo "Found existing L2 config, cross-checking with L1 deployment config" - - SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json) - EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag') - EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress') - - GEN_L2_CONF=$(cat /app/rollup.json) - GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash') - GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr') - - if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then - echo "Config cross-checked, exiting" - exit 0 - fi - - echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting" - exit 1 -fi - -op-node genesis l2 \ - --deploy-config /contracts-bedrock/deploy-config/getting-started.json \ - --deployment-dir /contracts-bedrock/deployments/getting-started/ \ - --outfile.l2 /app/genesis.json \ - --outfile.rollup /app/rollup.json \ - --l1-rpc $CERC_L1_RPC - -openssl rand -hex 32 > /app/jwt.txt diff --git a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh new file mode 100755 index 00000000..23a2bc30 --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh @@ -0,0 +1,172 @@ +#!/bin/bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" + +export DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" +# Optional create2 salt for deterministic deployment of contract implementations +export IMPL_SALT=$(openssl rand -hex 32) + +echo "Using L1 RPC endpoint ${CERC_L1_RPC}" + +# Exit if a deployment already exists (on restarts) +if [ -d "/l1-deployment/$DEPLOYMENT_CONTEXT" ]; then + echo "Deployment directory /l1-deployment/$DEPLOYMENT_CONTEXT, checking OptimismPortal deployment" + + OPTIMISM_PORTAL_ADDRESS=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/OptimismPortal.json | jq -r .address) + contract_code=$(cast code $OPTIMISM_PORTAL_ADDRESS --rpc-url $CERC_L1_RPC) + + if [ -z "${contract_code#0x}" ]; then + echo "Error: A deployment directory was found in the volume, but no contract code was found on-chain at the associated address. Please clear L1 deployment volume before restarting." + exit 1 + else + echo "Deployment found, exiting (successfully)." + exit 0 + fi +fi + +wait_for_block() { + local block="$1" # Block to wait for + local timeout="$2" # Max time to wait in seconds + + echo "Waiting for block $block." + i=0 + loops=$(($timeout/10)) + while [ -z "$block_result" ] && [[ "$i" -lt "$loops" ]]; do + sleep 10 + echo "Checking..." + block_result=$(cast block $block --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) + i=$(($i + 1)) + done +} + +# We need four accounts and their private keys for the deployment: Admin, Proposer, Batcher, and Sequencer +# If $CERC_L1_ADDRESS and $CERC_L1_PRIV_KEY have been set, we'll assign it to Admin and generate/fund the remaining three accounts from it +# If not, we'll assume the L1 is the stack's own fixturenet-eth and use the pre-funded accounts/keys from $CERC_L1_ACCOUNTS_CSV_URL +if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then + wallet1=$(cast wallet new) + wallet2=$(cast wallet new) + wallet3=$(cast wallet new) + # Admin + ADMIN=$CERC_L1_ADDRESS + ADMIN_KEY=$CERC_L1_PRIV_KEY + # Proposer + PROPOSER=$(echo "$wallet1" | awk '/Address:/{print $2}') + PROPOSER_KEY=$(echo "$wallet1" | awk '/Private key:/{print $3}') + # Batcher + BATCHER=$(echo "$wallet2" | awk '/Address:/{print $2}') + BATCHER_KEY=$(echo "$wallet2" | awk '/Private key:/{print $3}') + # Sequencer + SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}') + SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}') + + echo "Funding accounts." + wait_for_block 1 300 + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 10ether $BATCHER --private-key $ADMIN_KEY + cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 2ether $SEQ --private-key $ADMIN_KEY +else + curl -o accounts.csv $CERC_L1_ACCOUNTS_CSV_URL + # Admin + ADMIN=$(awk -F ',' 'NR == 1 {print $2}' accounts.csv) + ADMIN_KEY=$(awk -F ',' 'NR == 1 {print $3}' accounts.csv) + # Proposer + PROPOSER=$(awk -F ',' 'NR == 2 {print $2}' accounts.csv) + PROPOSER_KEY=$(awk -F ',' 'NR == 2 {print $3}' accounts.csv) + # Batcher + BATCHER=$(awk -F ',' 'NR == 3 {print $2}' accounts.csv) + BATCHER_KEY=$(awk -F ',' 'NR == 3 {print $3}' accounts.csv) + # Sequencer + SEQ=$(awk -F ',' 'NR == 4 {print $2}' accounts.csv) + SEQ_KEY=$(awk -F ',' 'NR == 4 {print $3}' accounts.csv) +fi + +echo "Using accounts:" +echo -e "Admin: $ADMIN\nProposer: $PROPOSER\nBatcher: $BATCHER\nSequencer: $SEQ" + +# These accounts will be needed by other containers, so write them to a shared volume +echo "Writing accounts/private keys to volume l2_accounts." +accounts_json=$(jq -n \ + --arg Admin "$ADMIN" --arg AdminKey "$ADMIN_KEY" \ + --arg Proposer "$PROPOSER" --arg ProposerKey "$PROPOSER_KEY" \ + --arg Batcher "$BATCHER" --arg BatcherKey "$BATCHER_KEY" \ + --arg Seq "$SEQ" --arg SeqKey "$SEQ_KEY" \ + '{Admin: $Admin, AdminKey: $AdminKey, Proposer: $Proposer, ProposerKey: $ProposerKey, Batcher: $Batcher, BatcherKey: $BatcherKey, Seq: $Seq, SeqKey: $SeqKey}') +echo "$accounts_json" > "/l2-accounts/accounts.json" + +# Get a finalized L1 block to set as the starting point for the L2 deployment +# If the chain is a freshly created fixturenet-eth, a finalized block won't be available for many minutes; rather than wait, we can use block 1 +echo "Checking L1 for finalized block..." +finalized=$(cast block finalized --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) + +if [ -n "$finalized" ]; then + # finalized block was found + start_block=$finalized +else + # assume fresh chain and use block 1 instead + echo "No finalized block. Using block 1 instead." + # wait for 20 or so blocks to be safe + wait_for_block 24 300 + start_block=$(cast block 1 --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true) +fi + +if [ -z "$start_block" ]; then + echo "Unable to query chain for starting block. Exiting..." + exit 1 +fi + +BLOCKHASH=$(echo $start_block | awk -F ' ' '{print $2}') +HEIGHT=$(echo $start_block | awk -F ' ' '{print $4}') +TIMESTAMP=$(echo $start_block | awk -F ' ' '{print $6}') + +echo "Using block as deployment point:" +echo "Height: $HEIGHT" +echo "Hash: $BLOCKHASH" +echo "Timestamp: $TIMESTAMP" + +# Fill out the deployment template (./deploy-config/getting-started.json) with our values: +echo "Writing deployment config." +deploy_config_file="deploy-config/$DEPLOYMENT_CONTEXT.json" +cp deploy-config/getting-started.json $deploy_config_file +sed -i "s/\"l1ChainID\": .*/\"l1ChainID\": $DEPLOYMENT_CONTEXT,/g" $deploy_config_file +sed -i "s/ADMIN/$ADMIN/g" $deploy_config_file +sed -i "s/PROPOSER/$PROPOSER/g" $deploy_config_file +sed -i "s/BATCHER/$BATCHER/g" $deploy_config_file +sed -i "s/SEQUENCER/$SEQ/g" $deploy_config_file +sed -i "s/BLOCKHASH/$BLOCKHASH/g" $deploy_config_file +sed -i "s/TIMESTAMP/$TIMESTAMP/g" $deploy_config_file + +mkdir -p deployments/$DEPLOYMENT_CONTEXT + +# Deployment requires the create2 deterministic proxy contract be published on L1 at address 0x4e59b44847b379578588920ca78fbf26c0b4956c +# See: https://github.com/Arachnid/deterministic-deployment-proxy +echo "Deploying create2 proxy contract..." +echo "Funding deployment signer address" +deployment_signer="0x3fab184622dc19b6109349b94811493bf2a45362" +cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 0.5ether $deployment_signer --private-key $ADMIN_KEY +echo "Deploying contract..." +raw_bytes="0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222" + +cast publish --rpc-url $CERC_L1_RPC $raw_bytes + +# Create the L2 deployment +echo "Deploying L1 Optimism contracts..." +forge script scripts/Deploy.s.sol:Deploy --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC +forge script scripts/Deploy.s.sol:Deploy --sig 'sync()' --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC + +echo "*************************************" +echo "Done deploying contracts." + +# Copy files needed by other containers to the appropriate shared volumes +echo "Copying deployment artifacts volume l1_deployment and deploy-config to volume l2_config" +cp -a /app/packages/contracts-bedrock/deployments/$DEPLOYMENT_CONTEXT /l1-deployment +cp /app/packages/contracts-bedrock/deploy-config/$DEPLOYMENT_CONTEXT.json /l2-config +openssl rand -hex 32 > /l2-config/l2-jwt.txt + +echo "Deployment successful. Exiting" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh deleted file mode 100755 index d878c03f..00000000 --- a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" -CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" - -CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" - -echo "Using L1 RPC endpoint ${CERC_L1_RPC}" - -IMPORT_1="import './verify-contract-deployment'" -IMPORT_2="import './rekey-json'" -IMPORT_3="import './send-balance'" - -# Append mounted tasks to tasks/index.ts file if not present -if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then - echo "$IMPORT_1" >> tasks/index.ts - echo "$IMPORT_2" >> tasks/index.ts - echo "$IMPORT_3" >> tasks/index.ts -fi - -# Update the chainId in the hardhat config -sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts - -# Exit if a deployment already exists (on restarts) -# Note: fixturenet-eth-geth currently starts fresh on a restart -if [ -d "deployments/getting-started" ]; then - echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment" - - # Read JSON file into variable - SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json) - - # Parse JSON into variables - SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address') - SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash') - - if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then - echo "Deployment verfication successful, exiting" - exit 0 - else - echo "Deployment verfication failed, please clear L1 deployment volume before starting" - exit 1 - fi -fi - -# Generate the L2 account addresses -yarn hardhat rekey-json --output /l2-accounts/keys.json - -# Read JSON file into variable -KEYS_JSON=$(cat /l2-accounts/keys.json) - -# Parse JSON into variables -ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address') -ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey') -PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address') -BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address') -SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address') - -# Get the private keys of L1 accounts -if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \ - l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \ - [ "$l1_accounts_response" -eq 200 ]; -then - echo "Fetching L1 account credentials using provided URL" - mkdir -p /geth-accounts - wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL" - - CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2) - CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) - CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv) - CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) -else - echo "Couldn't fetch L1 account credentials, using them from env" -fi - -# Send balances to the above L2 addresses -yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started - -echo "Balances sent to L2 accounts" - -# Select a finalized L1 block as the starting point for roll ups -until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do - echo "Waiting for a finalized L1 block to exist, retrying after 10s" - sleep 10 -done - -L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}') -L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}') -L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}') - -echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups" - -# Update the deployment config -sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json -jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json - -node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH" - -echo "Updated the deployment config" - -# Create a .env file -echo "L1_RPC=$CERC_L1_RPC" > .env -echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env - -echo "Deploying the L1 smart contracts, this will take a while..." - -# Deploy the L1 smart contracts -yarn hardhat deploy --network getting-started --tags l1 - -echo "Deployed the L1 smart contracts" - -# Read Proxy contract's JSON and get the address -PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json) -PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address') - -# Send balance to the above Proxy contract in L1 for reflecting balance in L2 -# First account -yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started -# Second account -yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started - -echo "Balance sent to Proxy L2 contract" -echo "Use following accounts for transactions in L2:" -echo "${CERC_L1_ADDRESS}" -echo "${CERC_L1_ADDRESS_2}" -echo "Done" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js b/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js deleted file mode 100644 index 8a6c09d4..00000000 --- a/stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/update-config.js +++ /dev/null @@ -1,36 +0,0 @@ -const fs = require('fs') - -// Get the command-line argument -const configFile = process.argv[2] -const adminAddress = process.argv[3] -const proposerAddress = process.argv[4] -const batcherAddress = process.argv[5] -const sequencerAddress = process.argv[6] -const blockHash = process.argv[7] - -// Read the JSON file -const configData = fs.readFileSync(configFile) -const configObj = JSON.parse(configData) - -// Update the finalSystemOwner property with the ADMIN_ADDRESS value -configObj.finalSystemOwner = - configObj.portalGuardian = - configObj.controller = - configObj.l2OutputOracleChallenger = - configObj.proxyAdminOwner = - configObj.baseFeeVaultRecipient = - configObj.l1FeeVaultRecipient = - configObj.sequencerFeeVaultRecipient = - configObj.governanceTokenOwner = - adminAddress - -configObj.l2OutputOracleProposer = proposerAddress - -configObj.batchSenderAddress = batcherAddress - -configObj.p2pSequencerAddress = sequencerAddress - -configObj.l1StartingBlockTag = blockHash - -// Write the updated JSON object back to the file -fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2)) diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh new file mode 100755 index 00000000..b24fe867 --- /dev/null +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-geth.sh @@ -0,0 +1,155 @@ +#!/bin/bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# To facilitate deploying the Optimism contracts, a few additional arguments have been added to the geth start command +# Otherwise this script is unchanged from the image's default startup script + +ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` +NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` +NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'` +CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}" +CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}" + +cd /opt/testnet/build/el +python3 -m http.server 9898 & +cd $HOME + +START_CMD="geth" +if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then + START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" +fi + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + + # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process) + pkill -P ${geth_pid} + sleep 2 + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +if [ "true" == "$RUN_BOOTNODE" ]; then + $START_CMD \ + --datadir="${CERC_ETH_DATADIR}" \ + --nodekeyhex="${BOOTNODE_KEY}" \ + --nodiscover \ + --ipcdisable \ + --networkid=${NETWORK_ID} \ + --netrestrict="${NETRESTRICT}" \ + & + + geth_pid=$! +else + cd /opt/testnet/accounts + ./import_keys.sh + + echo -n "$JWT" > /opt/testnet/build/el/jwtsecret + + if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then + dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short) + dig_status_code=$? + if [[ $dig_status_code = 0 && -n $dig_result ]]; then + echo "Statediff DB at $CERC_STATEDIFF_DB_HOST" + CERC_RUN_STATEDIFF="true" + else + echo "No statediff DB available." + CERC_RUN_STATEDIFF="false" + fi + fi + + STATEDIFF_OPTS="" + if [ "$CERC_RUN_STATEDIFF" == "true" ]; then + ready=0 + echo "Waiting for statediff DB..." + while [ $ready -eq 0 ]; do + sleep 1 + export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" + result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ + -p "$CERC_STATEDIFF_DB_PORT" \ + -U "$CERC_STATEDIFF_DB_USER" \ + -d "$CERC_STATEDIFF_DB_NAME" \ + -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') + if [ -n "$result" ]; then + echo "DB ready..." + if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then + ready=1 + else + echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)" + fi + fi + done + STATEDIFF_OPTS="--statediff \ + --statediff.db.host=$CERC_STATEDIFF_DB_HOST \ + --statediff.db.name=$CERC_STATEDIFF_DB_NAME \ + --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ + --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \ + --statediff.db.port=$CERC_STATEDIFF_DB_PORT \ + --statediff.db.user=$CERC_STATEDIFF_DB_USER \ + --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ + --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ + --statediff.waitforsync=true \ + --statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \ + --statediff.writing=true" + + if [ -d "${CERC_PLUGINS_DIR}" ]; then + # With plugeth, we separate the statediff options by prefixing with ' -- ' + STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}" + fi + fi + + # unlock account[0] + echo $ACCOUNT_PASSWORD > "$CERC_ETH_DATADIR/password" + + $START_CMD \ + --datadir="${CERC_ETH_DATADIR}" \ + --bootnodes="${ENODE}" \ + --allow-insecure-unlock \ + --password="${CERC_ETH_DATADIR}/password" \ + --unlock="$ETHERBASE" \ + --rpc.allow-unprotected-txs \ + --http \ + --http.addr="0.0.0.0" \ + --http.vhosts="*" \ + --http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --authrpc.addr="0.0.0.0" \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \ + --ws \ + --ws.addr="0.0.0.0" \ + --ws.origins="*" \ + --ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --networkid="${NETWORK_ID}" \ + --netrestrict="${NETRESTRICT}" \ + --gcmode archive \ + --txlookuplimit=0 \ + --cache.preimages \ + --syncmode=full \ + --mine \ + --miner.threads=1 \ + --metrics \ + --metrics.addr="0.0.0.0" \ + --verbosity=${CERC_GETH_VERBOSITY:-3} \ + --log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ + --miner.etherbase="${ETHERBASE}" \ + ${STATEDIFF_OPTS} \ + & + + geth_pid=$! +fi + +wait $geth_pid + +if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then + while [ 1 -eq 1 ]; do + sleep 60 + done +fi diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh index 18955545..29a65d5d 100755 --- a/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-batcher.sh @@ -6,22 +6,14 @@ fi CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" -# Get Batcher key from keys.json -BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"') +# Start op-batcher +L2_RPC="http://op-geth:8545" +ROLLUP_RPC="http://op-node:8547" +BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey) -cleanup() { - echo "Signal received, cleaning up..." - kill ${batcher_pid} - - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-batcher op-batcher \ - --l2-eth-rpc=http://op-geth:8545 \ - --rollup-rpc=http://op-node:8547 \ + --l2-eth-rpc=$L2_RPC \ + --rollup-rpc=$ROLLUP_RPC \ --poll-interval=1s \ --sub-safety-margin=6 \ --num-confirmations=1 \ @@ -32,8 +24,4 @@ op-batcher \ --rpc.enable-admin \ --max-channel-duration=1 \ --l1-eth-rpc=$CERC_L1_RPC \ - --private-key=$BATCHER_KEY \ - & - -batcher_pid=$! -wait $batcher_pid + --private-key="${BATCHER_KEY#0x}" diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh index 8b521f85..9b06cedc 100755 --- a/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-geth.sh @@ -4,61 +4,36 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -# TODO: Add in container build or use other tool -echo "Installing jq" -apk update && apk add jq +l2_genesis_file="/l2-config/genesis.json" -# Get Sequencer key from keys.json -SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') +# Check for genesis file; if necessary, wait on op-node to generate +timeout=300 # 5 minutes +start_time=$(date +%s) +elapsed_time=0 +echo "Checking for L2 genesis file at location $l2_genesis_file" +while [ ! -f "$l2_genesis_file" ] && [ $elapsed_time -lt $timeout ]; do + echo "Waiting for L2 genesis file to be generated..." + sleep 10 + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) +done -# Initialize op-geth if datadir/geth not found -if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then - echo "Found existing datadir, checking block signer key" - - BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key) - - if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then - echo "Sequencer and block signer keys match, skipping initialization" - else - echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting" - exit 1 - fi -else - echo "Initializing op-geth" - - mkdir -p datadir - echo "pwd" > datadir/password - echo $SEQUENCER_KEY > datadir/block-signer-key - - geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key - - while [ ! -f "/op-node/jwt.txt" ] - do - echo "Config files not created. Checking after 5 seconds." - sleep 5 - done - - echo "Config files created by op-node, proceeding with the initialization..." - - geth init --datadir=datadir /op-node/genesis.json - echo "Node Initialized" +if [ ! -f "$l2_genesis_file" ]; then + echo "L2 genesis file not found after timeout of $timeout seconds. Exiting..." + exit 1 fi -SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"') -echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}" +# Initialize geth from our generated L2 genesis file (if not already initialized) +data_dir="/datadir" +if [ ! -d "$datadir/geth" ]; then + geth init --datadir=$data_dir $l2_genesis_file +fi -cleanup() { - echo "Signal received, cleaning up..." - kill ${geth_pid} +# Start op-geth +jwt_file="/l2-config/l2-jwt.txt" - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-geth geth \ - --datadir ./datadir \ + --datadir=$data_dir \ --http \ --http.corsdomain="*" \ --http.vhosts="*" \ @@ -77,14 +52,5 @@ geth \ --authrpc.vhosts="*" \ --authrpc.addr=0.0.0.0 \ --authrpc.port=8551 \ - --authrpc.jwtsecret=/op-node/jwt.txt \ - --rollup.disabletxpoolgossip=true \ - --password=./datadir/password \ - --allow-insecure-unlock \ - --mine \ - --miner.etherbase=$SEQUENCER_ADDRESS \ - --unlock=$SEQUENCER_ADDRESS \ - & - -geth_pid=$! -wait $geth_pid + --authrpc.jwtsecret=$jwt_file \ + --rollup.disabletxpoolgossip=true diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh index 516cf0a5..60a96855 100755 --- a/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-node.sh @@ -4,23 +4,42 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" +DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" -# Get Sequencer key from keys.json -SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') +deploy_config_file="/l2-config/$DEPLOYMENT_CONTEXT.json" +deployment_dir="/l1-deployment/$DEPLOYMENT_CONTEXT" +genesis_outfile="/l2-config/genesis.json" +rollup_outfile="/l2-config/rollup.json" + +# Generate L2 genesis (if not already done) +if [ ! -f "$genesis_outfile" ] || [ ! -f "$rollup_outfile" ]; then + op-node genesis l2 \ + --deploy-config $deploy_config_file \ + --deployment-dir $deployment_dir \ + --outfile.l2 $genesis_outfile \ + --outfile.rollup $rollup_outfile \ + --l1-rpc $CERC_L1_RPC +fi + +# Start op-node +SEQ_KEY=$(cat /l2-accounts/accounts.json | jq -r .SeqKey) +jwt_file=/l2-config/l2-jwt.txt +L2_AUTH="http://op-geth:8551" +RPC_KIND=any # this can optionally be set to a preset for common node providers like Infura, Alchemy, etc. -# Run op-node op-node \ - --l2=http://op-geth:8551 \ - --l2.jwt-secret=/op-node-data/jwt.txt \ - --sequencer.enabled \ - --sequencer.l1-confs=3 \ - --verifier.l1-confs=3 \ - --rollup.config=/op-node-data/rollup.json \ - --rpc.addr=0.0.0.0 \ - --rpc.port=8547 \ - --p2p.disable \ - --rpc.enable-admin \ - --p2p.sequencer.key=$SEQUENCER_KEY \ - --l1=$CERC_L1_RPC \ - --l1.rpckind=any + --l2=$L2_AUTH \ + --l2.jwt-secret=$jwt_file \ + --sequencer.enabled \ + --sequencer.l1-confs=5 \ + --verifier.l1-confs=4 \ + --rollup.config=$rollup_outfile \ + --rpc.addr=0.0.0.0 \ + --rpc.port=8547 \ + --p2p.disable \ + --rpc.enable-admin \ + --p2p.sequencer.key="${SEQ_KEY#0x}" \ + --l1=$CERC_L1_RPC \ + --l1.rpckind=$RPC_KIND diff --git a/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh b/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh index 09746760..092705ca 100755 --- a/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh +++ b/stack_orchestrator/data/config/fixturenet-optimism/run-op-proposer.sh @@ -5,32 +5,18 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then fi CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID" -# Read the L2OutputOracle contract address from the deployment -L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json) -L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address') +# Start op-proposer +ROLLUP_RPC="http://op-node:8547" +PROPOSER_KEY=$(cat /l2-accounts/accounts.json | jq -r .ProposerKey) +L2OO_ADDR=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/L2OutputOracleProxy.json | jq -r .address) -# Get Proposer key from keys.json -PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"') - -cleanup() { - echo "Signal received, cleaning up..." - kill ${proposer_pid} - - wait - echo "Done" -} -trap 'cleanup' INT TERM - -# Run op-proposer op-proposer \ - --poll-interval 12s \ - --rpc.port 8560 \ - --rollup-rpc http://op-node:8547 \ - --l2oo-address $L2OO_ADDR \ - --private-key $PROPOSER_KEY \ - --l1-eth-rpc $CERC_L1_RPC \ - & - -proposer_pid=$! -wait $proposer_pid + --poll-interval=12s \ + --rpc.port=8560 \ + --rollup-rpc=$ROLLUP_RPC \ + --l2oo-address="${L2OO_ADDR#0x}" \ + --private-key="${PROPOSER_KEY#0x}" \ + --l1-eth-rpc=$CERC_L1_RPC diff --git a/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile index ed9c4b22..2499df0a 100644 --- a/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-contracts/Dockerfile @@ -17,6 +17,6 @@ WORKDIR /app COPY . . RUN echo "Building optimism" && \ - yarn && yarn build + pnpm install && pnpm build WORKDIR /app/packages/contracts-bedrock diff --git a/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile index 23d6b629..f52e75b9 100644 --- a/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-batcher/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -9,7 +9,7 @@ COPY ./op-batcher /app/op-batcher COPY ./op-bindings /app/op-bindings COPY ./op-node /app/op-node COPY ./op-service /app/op-service -COPY ./op-signer /app/op-signer +#COPY ./op-signer /app/op-signer COPY ./go.mod /app/go.mod COPY ./go.sum /app/go.sum @@ -23,7 +23,7 @@ ARG TARGETOS TARGETARCH RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache jq bash diff --git a/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile index 17d273b6..ad63bb2c 100644 --- a/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-node/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -21,7 +21,7 @@ ARG TARGETOS TARGETARCH RUN make op-node VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache openssl jq diff --git a/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile index e91aa4bb..9032a7ff 100644 --- a/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-optimism-op-proposer/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0-alpine3.15 as builder +FROM golang:1.21.0-alpine3.18 as builder ARG VERSION=v0.0.0 @@ -9,7 +9,7 @@ COPY ./op-proposer /app/op-proposer COPY ./op-bindings /app/op-bindings COPY ./op-node /app/op-node COPY ./op-service /app/op-service -COPY ./op-signer /app/op-signer +#COPY ./op-signer /app/op-signer COPY ./go.mod /app/go.mod COPY ./go.sum /app/go.sum COPY ./.git /app/.git @@ -22,7 +22,7 @@ ARG TARGETOS TARGETARCH RUN make op-proposer VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH -FROM alpine:3.15 +FROM alpine:3.18 RUN apk add --no-cache jq bash diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/README.md b/stack_orchestrator/data/stacks/fixturenet-optimism/README.md index 4d933f83..dd681aa5 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/README.md @@ -12,6 +12,7 @@ Clone required repositories: laconic-so --stack fixturenet-optimism setup-repositories # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +# The repositories are located in $HOME/cerc by default ``` Build the container images: @@ -39,20 +40,53 @@ This should create the required docker images in the local image registry: * `cerc/optimism-op-batcher` * `cerc/optimism-op-proposer` -## Deploy -Deploy the stack: +## Create a deployment +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: ```bash -laconic-so --stack fixturenet-optimism deploy up +laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed-random --output fixturenet-optimism-spec.yml ``` -The `fixturenet-optimism-contracts` service takes a while to complete running as it: -1. waits for the 'Merge' to happen on L1 -2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) -3. deploys the L1 contracts -It may restart a few times after running into errors. +### Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | + +For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `fixturenet-eth-geth-1` RPC to port 8545 and the `op-geth` RPC to port 9545 on the host. + +Or, you may wish to use `any-same` for the initial mappings -- in which case you'll have to edit the spec to file to ensure the various geth instances aren't all trying to publish to host ports 8545/8546 at once. + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack fixturenet-optimism deploy create --spec-file fixturenet-optimism-spec.yml --deployment-dir fixturenet-optimism-deployment +``` + +## Start the stack +Start the deployment: +```bash +laconic-so deployment --dir fixturenet-optimism-deployment start +``` +1. The `fixturenet-eth` L1 chain will start up first and begin producing blocks. +2. The `fixturenet-optimism-contracts` service will configure and deploy the Optimism contracts to L1, exiting when complete. This may take several minutes; you can follow the progress by following the container's logs (see below). +3. The `op-node` and `op-geth` services will initialize themselves (if not already initialized) and start +4. The remaining services, `op-batcher` and `op-proposer` will start + +### Logs To list and monitor the running containers: ```bash @@ -65,22 +99,69 @@ docker ps docker logs -f ``` -## Clean up +## Example: bridge some ETH from L1 to L2 -Stop all services running in the background: +Send some ETH from the desired account to the `L1StandardBridgeProxy` contract on L1 to test bridging to L2. +We can use the testing account `0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F` which is pre-funded and unlocked, and the `cerc/foundry:local` container to make use of the `cast` cli. + +1. Note the docker network the stack is running on: ```bash -laconic-so --stack fixturenet-optimism deploy down 30 +docker network ls +# The network name will be something like laconic-[some_hash]_default +``` +2. Set some variables: +```bash +L1_RPC=http://fixturenet-eth-geth-1:8545 +L2_RPC=http://op-geth:8545 +NETWORK= +DEPLOYMENT_CONTEXT= +ACCOUNT=0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F ``` -Clear volumes created by this stack: +If you need to check the L1 chain-id, you can use: +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast chain-id --rpc-url $L1_RPC" +``` + +3. Check the account starting balance on L2 (it should be 0): +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast balance $ACCOUNT --rpc-url $L2_RPC" +# 0 +``` + +4. Read the bridge contract address from the L1 deployment records in the `op-node` container: +```bash +# get the container id for op-node +NODE_CONTAINER=$(docker ps --filter "name=op-node" -q) +BRIDGE=$(docker exec $NODE_CONTAINER cat /l1-deployment/$DEPLOYMENT_CONTEXT/L1StandardBridgeProxy.json | jq -r .address) +``` + +5. Use cast to send some ETH to the bridge contract: +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast send --from $ACCOUNT --value 1ether $BRIDGE --rpc-url $L1_RPC" +``` + +6. Allow a couple minutes for the bridge to complete + +7. Check the L2 balance again (it should show the bridged funds): +```bash +docker run --rm --network $NETWORK cerc/foundry:local "cast balance $ACCOUNT --rpc-url $L2_RPC" +# 1000000000000000000 +``` + +## Clean up + +To stop all services running in the background, while preserving chain data: ```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" +laconic-so deployment --dir fixturenet-optimism-deployment stop +``` -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +To stop all services and also delete chain data: + +```bash +laconic-so deployment --dir fixturenet-optimism-deployment stop --delete-volumes ``` ## Troubleshooting diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py new file mode 100644 index 00000000..43aa6ca7 --- /dev/null +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py @@ -0,0 +1,37 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +from app.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext +from ruamel.yaml import YAML + +def create(context: DeploymentContext, extra_args): + # Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1 + # We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the + # deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment + fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml') + + with open(fixturenet_eth_compose_file, 'r') as yaml_file: + yaml=YAML() + yaml_data = yaml.load(yaml_file) + + new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh' + + if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']: + yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script) + + with open(fixturenet_eth_compose_file, 'w') as yaml_file: + yaml=YAML() + yaml.dump(yaml_data, yaml_file) + + return None diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md b/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md index 4e9daf43..4299ca8d 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/l2-only.md @@ -1,4 +1,4 @@ -# fixturenet-optimism +# fixturenet-optimism (L2-only) Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io) @@ -28,9 +28,43 @@ This should create the required docker images in the local image registry: * `cerc/optimism-op-batcher` * `cerc/optimism-op-proposer` -## Deploy +## Create a deployment -Create and update an env file to be used in the next step ([defaults](../../config/fixturenet-optimism/l1-params.env)): +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: +```bash +laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed-random --output fixturenet-optimism-spec.yml +``` +### Ports +It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections. +Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +In addition, a stack-wide port mapping "recipe" can be applied at the time the +`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported: +| Recipe | Host Port Mapping | +|--------|-------------------| +| any-variable-random | Bind to 0.0.0.0 using a random port assigned at start time (default) | +| localhost-same | Bind to 127.0.0.1 using the same port number as exposed by the containers | +| any-same | Bind to 0.0.0.0 using the same port number as exposed by the containers | +| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)| +| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) | + +For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `op-geth` RPC to an easy to remember port like 8545 or 9545 on the host. + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack fixturenet-optimism deploy create --spec-file fixturenet-optimism-spec.yml --deployment-dir fixturenet-optimism-deployment +``` + +Finally, open the `stack.yml` file inside your deployment directory and, under the `pods:` section, remove (or comment out) the entry for `fixturenet-eth`. This will prevent the deployment from trying to spin up a new L1 chain when starting the stack. + +## Set chain env variables + +Inside the deployment directory, open the file `config.env` and add the following variables to point the stack at your L1 rpc and provide account credentials ([defaults](../../config/fixturenet-optimism/l1-params.env)): ```bash # External L1 endpoint @@ -45,30 +79,29 @@ Create and update an env file to be used in the next step ([defaults](../../conf CERC_L1_ACCOUNTS_CSV_URL= # OR - # Specify the required account credentials + # Specify the required account credentials for the Admin account + # Other generated accounts will be funded from this account, so it should contain ~20 Eth CERC_L1_ADDRESS= CERC_L1_PRIV_KEY= - CERC_L1_ADDRESS_2= - CERC_L1_PRIV_KEY_2= ``` -* NOTE: If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port - -Deploy the stack: +* NOTE: If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`) +## Start the stack +Start the deployment: ```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism --env-file up +laconic-so deployment --dir fixturenet-optimism-deployment start ``` +1. The stack will check for a response from the L1 endpoint specified in your env file. +2. The `fixturenet-optimism-contracts` service will configure and deploy the Optimism contracts to L1, exiting when complete. This may take several minutes; you can follow the progress by following the container's logs (see below). +3. The `op-node` and `op-geth` services will initialize themselves (if not already initialized) and start +4. The remaining services, `op-batcher` and `op-proposer` will start -The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: -1. waits for the 'Merge' to happen on L1 -2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) -3. deploys the L1 contracts - -To list down and monitor the running containers: +### Logs +To list and monitor the running containers: ```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism ps +laconic-so --stack fixturenet-optimism deploy ps # With status docker ps @@ -79,20 +112,16 @@ docker logs -f ## Clean up -Stop all services running in the background: +To stop all L2 services running in the background, while preserving chain data: ```bash -laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism down 30 +laconic-so deployment --dir fixturenet-optimism-deployment stop ``` -Clear volumes created by this stack: +To stop all L2 services and also delete chain data: ```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" - -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +laconic-so deployment --dir fixturenet-optimism-deployment stop --delete-volumes ``` ## Troubleshooting diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml b/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml index 75c7620b..bca34b16 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/stack.yml @@ -5,8 +5,8 @@ repos: - git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5 - git.vdb.to/cerc-io/lighthouse - github.com/dboreham/foundry - - github.com/ethereum-optimism/optimism@v1.0.4 - - github.com/ethereum-optimism/op-geth@v1.101105.2 + - github.com/ethereum-optimism/optimism@op-node/v1.3.0 + - github.com/ethereum-optimism/op-geth@v1.101304.0 containers: - cerc/go-ethereum - cerc/lighthouse From 3db443b2bbcc26342e9b7fa63518efef8754b036 Mon Sep 17 00:00:00 2001 From: iskay Date: Fri, 10 Nov 2023 20:42:02 +0000 Subject: [PATCH 23/62] fix commands import --- .../data/stacks/fixturenet-optimism/deploy/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py index 43aa6ca7..76668135 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py @@ -12,7 +12,7 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -from app.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand, DeploymentContext +from stack_orchestrator.deploy.deployment_context import DeploymentContext from ruamel.yaml import YAML def create(context: DeploymentContext, extra_args): From 414b8870363f183cc719ac75726f845929137c13 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Fri, 10 Nov 2023 17:44:25 -0600 Subject: [PATCH 24/62] Allow setting build tool (npm/yarn) and next.js version. (#639) * Allow setting build tool (npm/yarn) and next.js version. --- .../cerc-nextjs-base/Dockerfile | 19 +++---------- .../cerc-nextjs-base/Dockerfile.webapp | 4 +++ .../scripts/apply-runtime-env.sh | 9 +++++++ .../cerc-nextjs-base/scripts/build-app.sh | 27 +++++++++++++++---- .../scripts/start-serving-app.sh | 11 +++++++- tests/webapp-test/run-webapp-test.sh | 8 +++--- 6 files changed, 52 insertions(+), 26 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile index 435d8c73..69e38932 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -28,28 +28,15 @@ RUN \ # [Optional] Uncomment this section to install additional OS packages. RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ - && apt-get -y install --no-install-recommends jq gettext-base - -# [Optional] Uncomment if you want to install an additional version of node using nvm -# ARG EXTRA_NODE_VERSION=10 -# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" - -# We do this to get a yq binary from the published container, for the correct architecture we're building here -# COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq - -COPY /scripts /scripts + && apt-get -y install --no-install-recommends jq gettext-base moreutils # [Optional] Uncomment if you want to install more global node modules # RUN su node -c "npm install -g " -# RUN mkdir -p /config -# COPY ./config.yml /config - -# Install simple web server for now (use nginx perhaps later) -# RUN yarn global add http-server - # Expose port for http EXPOSE 3000 +COPY /scripts /scripts + # Default command sleeps forever so docker doesn't kill it CMD ["/scripts/start-serving-app.sh"] diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp index f4b5d4d8..23f42385 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp @@ -1,4 +1,8 @@ FROM cerc/nextjs-base:local + +ARG CERC_NEXT_VERSION=latest +ARG CERC_BUILD_TOOL + WORKDIR /app COPY . . RUN rm -rf node_modules build .next* diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh index 793333c3..1e30b634 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/apply-runtime-env.sh @@ -8,6 +8,15 @@ WORK_DIR="${1:-./}" SRC_DIR="${2:-.next}" TRG_DIR="${3:-.next-r}" +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ]; then + CERC_BUILD_TOOL=npm + else + CERC_BUILD_TOOL=yarn + fi +fi + cd "${WORK_DIR}" || exit 1 rm -rf "$TRG_DIR" diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh index c2115f6a..83977268 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -4,8 +4,17 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-^14.0.2}" +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then + CERC_BUILD_TOOL=yarn + else + CERC_BUILD_TOOL=npm + fi +fi +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) WORK_DIR="${1:-/app}" cd "${WORK_DIR}" || exit 1 @@ -20,6 +29,7 @@ if [ $? -ne 0 ]; then fi js-beautify next.config.dist > next.config.js +echo "" >> next.config.js WEBPACK_REQ_LINE=$(grep -n "require([\'\"]webpack[\'\"])" next.config.js | cut -d':' -f1) if [ -z "$WEBPACK_REQ_LINE" ]; then @@ -58,7 +68,7 @@ if [ -n "$WEBPACK_CONF_LINE" ]; then cat > next.config.js.3 < next.config.js.2 cat > next.config.js.3 <&2 @@ -88,7 +98,14 @@ fi cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq '.scripts.cerc_generate = "next experimental-generate"' > package.json -npm install || exit 1 -npm run cerc_compile || exit 1 +CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`" + +if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then + echo "Changing 'next' version from $CUR_NEXT_VERSION to $CERC_NEXT_VERSION (set with --build-arg CERC_NEXT_VERSION)" + cat package.json | jq ".dependencies.next = \"$CERC_NEXT_VERSION\"" | sponge package.json +fi + +$CERC_BUILD_TOOL install || exit 1 +$CERC_BUILD_TOOL run cerc_compile || exit 1 exit 0 diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh index 61664c68..04ab0ad3 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -5,6 +5,15 @@ fi SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" +if [ -z "$CERC_BUILD_TOOL" ]; then + if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then + CERC_BUILD_TOOL=yarn + else + CERC_BUILD_TOOL=npm + fi +fi + CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}" cd "$CERC_WEBAPP_FILES_DIR" @@ -31,4 +40,4 @@ if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then fi fi -npm start . -p ${CERC_LISTEN_PORT:-3000} +$CERC_BUILD_TOOL start . -p ${CERC_LISTEN_PORT:-3000} diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh index 71b4da16..d32e0eba 100755 --- a/tests/webapp-test/run-webapp-test.sh +++ b/tests/webapp-test/run-webapp-test.sh @@ -24,7 +24,7 @@ git clone https://git.vdb.to/cerc-io/test-progressive-web-app.git $CERC_REPO_BAS # Test webapp command execution $TEST_TARGET_SO build-webapp --source-repo $CERC_REPO_BASE_DIR/test-progressive-web-app -UUID=`uuidgen` +CHECK="SPECIAL_01234567890_TEST_STRING" set +e @@ -34,7 +34,7 @@ wget -O test.before -m http://localhost:3000 docker remove -f $CONTAINER_ID -CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$UUID -d cerc/test-progressive-web-app:local) +CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -d cerc/test-progressive-web-app:local) sleep 3 wget -O test.after -m http://localhost:3000 @@ -43,7 +43,7 @@ docker remove -f $CONTAINER_ID echo "###########################################################################" echo "" -grep "$UUID" test.before > /dev/null +grep "$CHECK" test.before > /dev/null if [ $? -ne 1 ]; then echo "BEFORE: FAILED" exit 1 @@ -51,7 +51,7 @@ else echo "BEFORE: PASSED" fi -grep "$UUID" test.after > /dev/null +grep "$CHECK" test.after > /dev/null if [ $? -ne 0 ]; then echo "AFTER: FAILED" exit 1 From 95e881ba193194ab3abdcf57b410e3a4975574f2 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Mon, 13 Nov 2023 10:58:55 +0530 Subject: [PATCH 25/62] Add a sushiswap-v3 watcher stack (#638) * Add a sushiswap-v3 watcher stack * Add services for watcher db and server * Add service for watcher job-runner * Use 0.0.0.0 for watcher server config --------- Co-authored-by: Nabarun --- .../docker-compose-watcher-sushiswap-v3.yml | 77 +++++++++++++++ .../watcher-sushiswap-v3/start-job-runner.sh | 20 ++++ .../watcher-sushiswap-v3/start-server.sh | 23 +++++ .../watcher-config-template.toml | 98 +++++++++++++++++++ .../cerc-watcher-sushiswap-v3/Dockerfile | 10 ++ .../cerc-watcher-sushiswap-v3/build.sh | 9 ++ .../data/stacks/sushiswap-v3/README.md | 62 ++++++++++++ .../data/stacks/sushiswap-v3/stack.yml | 9 ++ 8 files changed, 308 insertions(+) create mode 100644 stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml create mode 100755 stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh create mode 100755 stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh create mode 100644 stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml create mode 100644 stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile create mode 100755 stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh create mode 100644 stack_orchestrator/data/stacks/sushiswap-v3/README.md create mode 100644 stack_orchestrator/data/stacks/sushiswap-v3/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml new file mode 100644 index 00000000..db367420 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml @@ -0,0 +1,77 @@ +version: '3.2' + +services: + sushiswap-v3-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=sushiswap-v3-watcher,sushiswap-v3-watcher-job-queue + - POSTGRES_EXTENSION=sushiswap-v3-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - sushiswap_v3_watcher_db_data:/var/lib/postgresql/data + ports: + - "127.0.0.1:15432:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + sushiswap-v3-watcher-job-runner: + restart: unless-stopped + depends_on: + sushiswap-v3-watcher-db: + condition: service_healthy + image: cerc/watcher-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-job-runner.sh"] + volumes: + - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh + ports: + - "127.0.0.1:9000:9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + sushiswap-v3-watcher-server: + restart: unless-stopped + depends_on: + sushiswap-v3-watcher-db: + condition: service_healthy + sushiswap-v3-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560} + command: ["bash", "./start-server.sh"] + volumes: + - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh + ports: + - "127.0.0.1:3008:3008" + - "127.0.0.1:9001:9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + sushiswap_v3_watcher_db_data: diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh new file mode 100755 index 00000000..819b1096 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-job-runner.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running job-runner..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh new file mode 100755 index 00000000..1b14f2e3 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Initializing watcher..." +yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1)) + +echo "Running server..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml new file mode 100644 index 00000000..7c582b80 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml @@ -0,0 +1,98 @@ +[server] + host = "0.0.0.0" + port = 3008 + kind = "active" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + # CAUTION: Disable only if state creation is not desired or can be filled subsequently + enableState = false + + subgraphPath = "./subgraph-build" + + # Interval to restart wasm instance periodically + wasmRestartBlocksInterval = 20 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Flag to specify whether RPC endpoint supports block hash as block tag parameter + rpcSupportsBlockHashParam = false + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "sushiswap-v3-watcher-db" + port = 5432 + database = "sushiswap-v3-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT" + + # Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client) + rpcClient = true + + # Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint + isFEVM = true + + # Boolean flag to filter event logs by contracts + filterLogsByAddresses = true + # Boolean flag to filter event logs by topics + filterLogsByTopics = true + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@sushiswap-v3-watcher-db/sushiswap-v3-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + subgraphEventsOrder = true + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = false + prefetchBlockCount = 10 + + # Boolean to switch between modes of processing events when starting the server. + # Setting to true will fetch filtered events and required blocks in a range of blocks and then process them. + # Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head). + useBlockRanges = true + + # Block range in which logs are fetched during historical blocks processing + historicalLogsBlockRange = 2000 + + # Max block range of historical processing after which it waits for completion of events processing + # If set to -1 historical processing does not wait for events processing and completes till latest canonical block + historicalMaxFetchAhead = 10000 diff --git a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile new file mode 100644 index 00000000..ac6241c4 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq + +WORKDIR /app + +COPY . . + +RUN echo "Installing dependencies and building sushiswap-v3-watcher-ts" && \ + yarn && yarn build diff --git a/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh new file mode 100755 index 00000000..4eb79eee --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-sushiswap-v3/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-sushiswap-v3 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-sushiswap-v3:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-watcher-ts diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/README.md b/stack_orchestrator/data/stacks/sushiswap-v3/README.md new file mode 100644 index 00000000..7116a6d9 --- /dev/null +++ b/stack_orchestrator/data/stacks/sushiswap-v3/README.md @@ -0,0 +1,62 @@ +# SushiSwap v3 Watcher + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack sushiswap-v3 setup-repositories --git-ssh --pull +``` + +Build the container images: + +```bash +laconic-so --stack sushiswap-v3 build-containers +``` + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step: + + ```bash + # External Filecoin (ETH RPC) endpoint to point the watcher + CERC_ETH_RPC_ENDPOINT= + ``` + +### Deploy the stack + +```bash +laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file up +``` + +* To list down and monitor the running containers: + + ```bash + laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 ps + + # With status + docker ps -a + + # Check logs for a container + docker logs -f + ``` + +## Clean up + +Stop all the services running in background: + +```bash +laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=sushiswap_v3" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=sushiswap_v3") +``` diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml new file mode 100644 index 00000000..a851ce51 --- /dev/null +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: sushiswap-v3 +description: "SushiSwap v3 watcher stack" +repos: + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.0 +containers: + - cerc/watcher-sushiswap-v3 +pods: + - watcher-sushiswap-v3 From a04730e7acc6d2055887285bef16853f7b449aae Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Mon, 13 Nov 2023 11:13:55 +0530 Subject: [PATCH 26/62] Add a merkl-sushiswap-v3 watcher stack (#641) * Add a merkl-sushiswap-v3 watcher stack * Remove unrequired image from list --- ...ker-compose-watcher-merkl-sushiswap-v3.yml | 77 +++++++++++++++ .../start-job-runner.sh | 20 ++++ .../start-server.sh | 23 +++++ .../watcher-config-template.toml | 98 +++++++++++++++++++ .../Dockerfile | 10 ++ .../cerc-watcher-merkl-sushiswap-v3/build.sh | 11 +++ .../data/container-image-list.txt | 1 + stack_orchestrator/data/pod-list.txt | 1 + stack_orchestrator/data/repository-list.txt | 1 + .../data/stacks/graph-node/README.md | 2 +- .../data/stacks/merkl-sushiswap-v3/README.md | 81 +++++++++++++++ .../data/stacks/merkl-sushiswap-v3/stack.yml | 9 ++ 12 files changed, 333 insertions(+), 1 deletion(-) create mode 100644 stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml create mode 100755 stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh create mode 100755 stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh create mode 100644 stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml create mode 100644 stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile create mode 100755 stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh create mode 100644 stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md create mode 100644 stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml new file mode 100644 index 00000000..62e848e8 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml @@ -0,0 +1,77 @@ +version: '3.2' + +services: + merkl-sushiswap-v3-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=merkl-sushiswap-v3-watcher,merkl-sushiswap-v3-watcher-job-queue + - POSTGRES_EXTENSION=merkl-sushiswap-v3-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - merkl_sushiswap_v3_watcher_db_data:/var/lib/postgresql/data + ports: + - "127.0.0.1:15432:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + merkl-sushiswap-v3-watcher-job-runner: + restart: unless-stopped + depends_on: + merkl-sushiswap-v3-watcher-db: + condition: service_healthy + image: cerc/watcher-merkl-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-job-runner.sh"] + volumes: + - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh + ports: + - "127.0.0.1:9000:9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + merkl-sushiswap-v3-watcher-server: + restart: unless-stopped + depends_on: + merkl-sushiswap-v3-watcher-db: + condition: service_healthy + merkl-sushiswap-v3-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-merkl-sushiswap-v3:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560} + command: ["bash", "./start-server.sh"] + volumes: + - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh + ports: + - "127.0.0.1:3007:3008" + - "127.0.0.1:9001:9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + merkl_sushiswap_v3_watcher_db_data: diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh new file mode 100755 index 00000000..819b1096 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-job-runner.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running job-runner..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh new file mode 100755 index 00000000..1b14f2e3 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Initializing watcher..." +yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1)) + +echo "Running server..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml new file mode 100644 index 00000000..053e5544 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml @@ -0,0 +1,98 @@ +[server] + host = "0.0.0.0" + port = 3008 + kind = "active" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + # CAUTION: Disable only if state creation is not desired or can be filled subsequently + enableState = false + + subgraphPath = "./subgraph-build" + + # Interval to restart wasm instance periodically + wasmRestartBlocksInterval = 20 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Flag to specify whether RPC endpoint supports block hash as block tag parameter + rpcSupportsBlockHashParam = false + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "merkl-sushiswap-v3-watcher-db" + port = 5432 + database = "merkl-sushiswap-v3-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT" + + # Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client) + rpcClient = true + + # Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint + isFEVM = true + + # Boolean flag to filter event logs by contracts + filterLogsByAddresses = true + # Boolean flag to filter event logs by topics + filterLogsByTopics = false + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@merkl-sushiswap-v3-watcher-db/merkl-sushiswap-v3-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + subgraphEventsOrder = true + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = false + prefetchBlockCount = 10 + + # Boolean to switch between modes of processing events when starting the server. + # Setting to true will fetch filtered events and required blocks in a range of blocks and then process them. + # Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head). + useBlockRanges = true + + # Block range in which logs are fetched during historical blocks processing + historicalLogsBlockRange = 2000 + + # Max block range of historical processing after which it waits for completion of events processing + # If set to -1 historical processing does not wait for events processing and completes till latest canonical block + historicalMaxFetchAhead = 10000 diff --git a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile new file mode 100644 index 00000000..e09738ac --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq + +WORKDIR /app + +COPY . . + +RUN echo "Installing dependencies and building merkl-sushiswap-v3-watcher-ts" && \ + yarn && yarn build diff --git a/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh new file mode 100755 index 00000000..b53ee621 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-merkl-sushiswap-v3/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Build cerc/watcher-merkl-sushiswap-v3 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/my-new-stack:local -f ${CERC_REPO_BASE_DIR}/my-new-stack/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/my-new-stack + +docker build -t cerc/watcher-merkl-sushiswap-v3:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/merkl-sushiswap-v3-watcher-ts diff --git a/stack_orchestrator/data/container-image-list.txt b/stack_orchestrator/data/container-image-list.txt index 256f0a6f..36df0201 100644 --- a/stack_orchestrator/data/container-image-list.txt +++ b/stack_orchestrator/data/container-image-list.txt @@ -57,3 +57,4 @@ cerc/nitro-contracts cerc/mobymask-snap cerc/ponder cerc/nitro-rpc-client +cerc/watcher-merkl-sushiswap-v3 \ No newline at end of file diff --git a/stack_orchestrator/data/pod-list.txt b/stack_orchestrator/data/pod-list.txt index 4ba1bac0..7b91600d 100644 --- a/stack_orchestrator/data/pod-list.txt +++ b/stack_orchestrator/data/pod-list.txt @@ -43,3 +43,4 @@ nitro-contracts mobymask-snap ponder ipld-eth-server-payments +merkl-sushiswap-v3 diff --git a/stack_orchestrator/data/repository-list.txt b/stack_orchestrator/data/repository-list.txt index ceaa910c..f6696464 100644 --- a/stack_orchestrator/data/repository-list.txt +++ b/stack_orchestrator/data/repository-list.txt @@ -47,3 +47,4 @@ github.com/cerc-io/go-nitro github.com/cerc-io/ts-nitro github.com/cerc-io/mobymask-snap github.com/cerc-io/ponder +github.com/cerc-io/merkl-sushiswap-v3-watcher-ts diff --git a/stack_orchestrator/data/stacks/graph-node/README.md b/stack_orchestrator/data/stacks/graph-node/README.md index 0527efc0..df3ae1eb 100644 --- a/stack_orchestrator/data/stacks/graph-node/README.md +++ b/stack_orchestrator/data/stacks/graph-node/README.md @@ -59,7 +59,7 @@ ports: Create deployment: ```bash -laconic-so deploy create --spec-file graph-node-spec.yml --deployment-dir graph-node-deployment +laconic-so --stack graph-node deploy create --spec-file graph-node-spec.yml --deployment-dir graph-node-deployment ``` ## Start the stack diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md new file mode 100644 index 00000000..4284c2ad --- /dev/null +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md @@ -0,0 +1,81 @@ +# Merkl SushiSwap v3 Watcher + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack merkl-sushiswap-v3 setup-repositories --git-ssh --pull +``` + +Build the container images: + +```bash +laconic-so --stack merkl-sushiswap-v3 build-containers +``` + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step: + + ```bash + # External Filecoin (ETH RPC) endpoint to point the watcher + CERC_ETH_RPC_ENDPOINT= + ``` + +### Deploy the stack + +```bash +laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-file up +``` + +* To list down and monitor the running containers: + + ```bash + laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 ps + + # With status + docker ps -a + + # Check logs for a container + docker logs -f + ``` + +* Open the GQL playground at http://localhost:3007/graphql + + ```graphql + { + _meta { + block { + number + timestamp + } + hasIndexingErrors + } + + factories { + id + poolCount + } + } + ``` + +## Clean up + +Stop all the services running in background: + +```bash +laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=merkl_sushiswap_v3" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=merkl_sushiswap_v3") +``` diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml new file mode 100644 index 00000000..e3b5abf1 --- /dev/null +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: merkl-sushiswap-v3 +description: "SushiSwap v3 watcher stack" +repos: + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.0 +containers: + - cerc/watcher-merkl-sushiswap-v3 +pods: + - watcher-merkl-sushiswap-v3 From 0aca087558d1da8a9a14ccd6a49f407956119b2e Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Mon, 13 Nov 2023 17:36:37 +0530 Subject: [PATCH 27/62] Upgrade release versions for merkl and sushiswap watchers (#642) * Upgrade merkl-sushiswap-v3-watcher-ts release * Increase blockDelayInMilliSecs for merkl-sushiswap-v3 watcher * Upgrade sushiswap-v3-watcher-ts release * Add sushiswap-v3 watcher to stack list * Avoid mapping ports that are not required to be exposed --- .../compose/docker-compose-watcher-merkl-sushiswap-v3.yml | 6 +++--- .../data/compose/docker-compose-watcher-sushiswap-v3.yml | 6 +++--- .../watcher-merkl-sushiswap-v3/watcher-config-template.toml | 3 ++- .../watcher-sushiswap-v3/watcher-config-template.toml | 3 ++- stack_orchestrator/data/container-image-list.txt | 3 ++- stack_orchestrator/data/pod-list.txt | 1 + stack_orchestrator/data/repository-list.txt | 1 + stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml | 2 +- stack_orchestrator/data/stacks/sushiswap-v3/stack.yml | 2 +- 9 files changed, 16 insertions(+), 11 deletions(-) diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml index 62e848e8..d08c6214 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml @@ -13,7 +13,7 @@ services: - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - merkl_sushiswap_v3_watcher_db_data:/var/lib/postgresql/data ports: - - "127.0.0.1:15432:5432" + - "5432" healthcheck: test: ["CMD", "nc", "-v", "localhost", "5432"] interval: 20s @@ -35,7 +35,7 @@ services: - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml - ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh ports: - - "127.0.0.1:9000:9000" + - "9000" healthcheck: test: ["CMD", "nc", "-v", "localhost", "9000"] interval: 20s @@ -63,7 +63,7 @@ services: - ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh ports: - "127.0.0.1:3007:3008" - - "127.0.0.1:9001:9001" + - "9001" healthcheck: test: ["CMD", "nc", "-v", "localhost", "3008"] interval: 20s diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml index db367420..219688db 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml @@ -13,7 +13,7 @@ services: - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - sushiswap_v3_watcher_db_data:/var/lib/postgresql/data ports: - - "127.0.0.1:15432:5432" + - "5432" healthcheck: test: ["CMD", "nc", "-v", "localhost", "5432"] interval: 20s @@ -35,7 +35,7 @@ services: - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml - ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh ports: - - "127.0.0.1:9000:9000" + - "9000" healthcheck: test: ["CMD", "nc", "-v", "localhost", "9000"] interval: 20s @@ -63,7 +63,7 @@ services: - ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh ports: - "127.0.0.1:3008:3008" - - "127.0.0.1:9001:9001" + - "9001" healthcheck: test: ["CMD", "nc", "-v", "localhost", "3008"] interval: 20s diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml index 053e5544..894a4660 100644 --- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml @@ -81,7 +81,8 @@ jobDelayInMilliSecs = 100 eventsInBatch = 50 subgraphEventsOrder = true - blockDelayInMilliSecs = 2000 + # Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime + blockDelayInMilliSecs = 30000 prefetchBlocksInMem = false prefetchBlockCount = 10 diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml index 7c582b80..07880a8d 100644 --- a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml @@ -81,7 +81,8 @@ jobDelayInMilliSecs = 100 eventsInBatch = 50 subgraphEventsOrder = true - blockDelayInMilliSecs = 2000 + # Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime + blockDelayInMilliSecs = 30000 prefetchBlocksInMem = false prefetchBlockCount = 10 diff --git a/stack_orchestrator/data/container-image-list.txt b/stack_orchestrator/data/container-image-list.txt index 36df0201..41dd8b21 100644 --- a/stack_orchestrator/data/container-image-list.txt +++ b/stack_orchestrator/data/container-image-list.txt @@ -57,4 +57,5 @@ cerc/nitro-contracts cerc/mobymask-snap cerc/ponder cerc/nitro-rpc-client -cerc/watcher-merkl-sushiswap-v3 \ No newline at end of file +cerc/watcher-merkl-sushiswap-v3 +cerc/watcher-sushiswap-v3 diff --git a/stack_orchestrator/data/pod-list.txt b/stack_orchestrator/data/pod-list.txt index 7b91600d..9ad000c7 100644 --- a/stack_orchestrator/data/pod-list.txt +++ b/stack_orchestrator/data/pod-list.txt @@ -44,3 +44,4 @@ mobymask-snap ponder ipld-eth-server-payments merkl-sushiswap-v3 +sushiswap-v3 diff --git a/stack_orchestrator/data/repository-list.txt b/stack_orchestrator/data/repository-list.txt index f6696464..192a831e 100644 --- a/stack_orchestrator/data/repository-list.txt +++ b/stack_orchestrator/data/repository-list.txt @@ -48,3 +48,4 @@ github.com/cerc-io/ts-nitro github.com/cerc-io/mobymask-snap github.com/cerc-io/ponder github.com/cerc-io/merkl-sushiswap-v3-watcher-ts +github.com/cerc-io/sushiswap-v3-watcher-ts diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml index e3b5abf1..8b010a24 100644 --- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: merkl-sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.0 + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.1 containers: - cerc/watcher-merkl-sushiswap-v3 pods: diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml index a851ce51..66065b5d 100644 --- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.0 + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.1 containers: - cerc/watcher-sushiswap-v3 pods: From f1f618c57a5658d93935602b6a9ef7f9bb1b48f8 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Mon, 13 Nov 2023 11:56:04 -0600 Subject: [PATCH 28/62] Don't change the next.js version by default. (#640) --- .../data/container-build/cerc-nextjs-base/Dockerfile.webapp | 2 +- .../container-build/cerc-nextjs-base/scripts/build-app.sh | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp index 23f42385..51664deb 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile.webapp @@ -1,6 +1,6 @@ FROM cerc/nextjs-base:local -ARG CERC_NEXT_VERSION=latest +ARG CERC_NEXT_VERSION=keep ARG CERC_BUILD_TOOL WORKDIR /app diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh index 83977268..e62bc0d0 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -4,7 +4,7 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-^14.0.2}" +CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}" CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" if [ -z "$CERC_BUILD_TOOL" ]; then if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then @@ -101,8 +101,10 @@ cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`" if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then - echo "Changing 'next' version from $CUR_NEXT_VERSION to $CERC_NEXT_VERSION (set with --build-arg CERC_NEXT_VERSION)" + echo "Changing 'next' version specifier from '$CUR_NEXT_VERSION' to '$CERC_NEXT_VERSION' (set with --build-arg CERC_NEXT_VERSION)" cat package.json | jq ".dependencies.next = \"$CERC_NEXT_VERSION\"" | sponge package.json +else + echo "'next' version specifier '$CUR_NEXT_VERSION' (override with --build-arg CERC_NEXT_VERSION)" fi $CERC_BUILD_TOOL install || exit 1 From f088cbb3b0c64f864303b9f390007e976c0880cf Mon Sep 17 00:00:00 2001 From: iskay Date: Tue, 14 Nov 2023 14:38:49 +0000 Subject: [PATCH 29/62] fix linter errors --- .../stacks/fixturenet-optimism/deploy/commands.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py index 76668135..fa757cf5 100644 --- a/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py +++ b/stack_orchestrator/data/stacks/fixturenet-optimism/deploy/commands.py @@ -12,26 +12,28 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . + from stack_orchestrator.deploy.deployment_context import DeploymentContext from ruamel.yaml import YAML + def create(context: DeploymentContext, extra_args): # Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1 # We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the # deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml') - + with open(fixturenet_eth_compose_file, 'r') as yaml_file: - yaml=YAML() + yaml = YAML() yaml_data = yaml.load(yaml_file) - + new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh' if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']: yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script) - + with open(fixturenet_eth_compose_file, 'w') as yaml_file: - yaml=YAML() + yaml = YAML() yaml.dump(yaml_data, yaml_file) return None From 9687d8446823cd63acff3b11bb72fd3b07ed2556 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 14 Nov 2023 16:07:26 -0600 Subject: [PATCH 30/62] 646: Add error message for webapp startup hang (#647) This fixes three issues: 1. #644 (build output) 2. #646 (error on startup) 3. automatic env quote handling (related to 2) For the build output we now have: ``` ################################################################# Built host container for /home/telackey/tmp/iglootools-home with tag: cerc/iglootools-home:local To test locally run: docker run -p 3000:3000 cerc/iglootools-home:local ``` For the startup error, it was hung waiting for the "success" message from the next generate output (itself a workaround for a nextjs bug fixed by this PR we submitted: https://github.com/vercel/next.js/pull/58276). I added a timeout which will cause it to wait up to a maximum _n_ seconds before issuing: ``` ERROR: 'npm run cerc_generate' exceeded CERC_MAX_GENERATE_TIME. ``` On the quoting itself, I plan on adding a new run-webapp command, but I realized I had a decent spot to do effect the quote replacement on-the-fly after all when I am already escaping the values for insertion/replacement into JS. The "dequoting" can be disabled with `CERC_RETAIN_ENV_QUOTES=true`. --- .../cerc-nextjs-base/Dockerfile | 2 +- .../container-build/cerc-nextjs-base/build.sh | 16 +++++++++ .../scripts/apply-runtime-env.sh | 3 ++ .../scripts/start-serving-app.sh | 35 +++++++++++++++---- 4 files changed, 49 insertions(+), 7 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile index 69e38932..c2416b67 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -39,4 +39,4 @@ EXPOSE 3000 COPY /scripts /scripts # Default command sleeps forever so docker doesn't kill it -CMD ["/scripts/start-serving-app.sh"] +ENTRYPOINT ["/scripts/start-serving-app.sh"] diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh index 3cf5f7f4..342dd3cd 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh @@ -11,3 +11,19 @@ CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/D CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local} docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR + +if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then + cat < /dev/null && pwd ) +CERC_MAX_GENERATE_TIME=${CERC_MAX_GENERATE_TIME:-60} +tpid="" + +ctrl_c() { + kill $tpid $(ps -ef | grep node | grep next | awk '{print $2}') 2>/dev/null +} + +trap ctrl_c INT CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" if [ -z "$CERC_BUILD_TOOL" ]; then @@ -25,18 +34,32 @@ if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then jq -e '.scripts.cerc_generate' package.json >/dev/null if [ $? -eq 0 ]; then npm run cerc_generate > gen.out 2>&1 & - tail -n0 -f gen.out | sed '/rendered as static HTML/ q' + tail -f gen.out & + tpid=$! + count=0 - while [ $count -lt 10 ]; do + generate_done="false" + while [ $count -lt $CERC_MAX_GENERATE_TIME ]; do sleep 1 - ps -ef | grep 'node' | grep 'next' | grep 'generate' >/dev/null - if [ $? -ne 0 ]; then - break + grep 'rendered as static HTML' gen.out > /dev/null + if [ $? -eq 0 ]; then + generate_done="true" + ps -ef | grep 'node' | grep 'next' | grep 'generate' >/dev/null + if [ $? -ne 0 ]; then + break + fi else count=$((count + 1)) fi done - kill $(ps -ef |grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null + + if [ $generate_done != "true" ]; then + echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2 + exit 1 + fi + + kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null + tpid="" fi fi From 4ae4d3b61d8a7713e51d9d8116efdfd2e93cc427 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Tue, 14 Nov 2023 17:30:01 -0600 Subject: [PATCH 31/62] Print docker container logs in webapp test. (#649) --- .../cerc-nextjs-base/scripts/start-serving-app.sh | 9 ++------- tests/webapp-test/run-webapp-test.sh | 12 ++++++++---- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh index aec65f29..bf35bcdb 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/start-serving-app.sh @@ -39,17 +39,12 @@ if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then count=0 generate_done="false" - while [ $count -lt $CERC_MAX_GENERATE_TIME ]; do + while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do sleep 1 + count=$((count + 1)) grep 'rendered as static HTML' gen.out > /dev/null if [ $? -eq 0 ]; then generate_done="true" - ps -ef | grep 'node' | grep 'next' | grep 'generate' >/dev/null - if [ $? -ne 0 ]; then - break - fi - else - count=$((count + 1)) fi done diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh index d32e0eba..5db382f8 100755 --- a/tests/webapp-test/run-webapp-test.sh +++ b/tests/webapp-test/run-webapp-test.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash set -e + if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi + # Dump environment variables for debugging echo "Environment variables:" env @@ -28,16 +30,18 @@ CHECK="SPECIAL_01234567890_TEST_STRING" set +e -CONTAINER_ID=$(docker run -p 3000:3000 -d cerc/test-progressive-web-app:local) +CONTAINER_ID=$(docker run -p 3000:3000 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local) sleep 3 -wget -O test.before -m http://localhost:3000 +wget -t 7 -O test.before -m http://localhost:3000 +docker logs $CONTAINER_ID docker remove -f $CONTAINER_ID -CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -d cerc/test-progressive-web-app:local) +CONTAINER_ID=$(docker run -p 3000:3000 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local) sleep 3 -wget -O test.after -m http://localhost:3000 +wget -t 7 -O test.after -m http://localhost:3000 +docker logs $CONTAINER_ID docker remove -f $CONTAINER_ID echo "###########################################################################" From 638fa016498a17a0ea24f3b4401459f464d208b5 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 14 Nov 2023 20:59:48 -0700 Subject: [PATCH 32/62] Support external stack file (#650) --- stack_orchestrator/build/build_containers.py | 32 +++++++++++++++---- stack_orchestrator/build/build_webapp.py | 4 +-- stack_orchestrator/constants.py | 16 ++++++++++ .../repos/setup_repositories.py | 20 ++++++++---- stack_orchestrator/util.py | 11 +++++++ 5 files changed, 68 insertions(+), 15 deletions(-) create mode 100644 stack_orchestrator/constants.py diff --git a/stack_orchestrator/build/build_containers.py b/stack_orchestrator/build/build_containers.py index 5b2748cc..e987c504 100644 --- a/stack_orchestrator/build/build_containers.py +++ b/stack_orchestrator/build/build_containers.py @@ -27,7 +27,7 @@ import subprocess import click import importlib.resources from pathlib import Path -from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config +from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external from stack_orchestrator.base import get_npm_registry_url # TODO: find a place for this @@ -58,7 +58,8 @@ def make_container_build_env(dev_root_path: str, return container_build_env -def process_container(container, +def process_container(stack: str, + container, container_build_dir: str, container_build_env: dict, dev_root_path: str, @@ -69,12 +70,29 @@ def process_container(container, ): if not quiet: print(f"Building: {container}") - build_dir = os.path.join(container_build_dir, container.replace("/", "-")) - build_script_filename = os.path.join(build_dir, "build.sh") + + default_container_tag = f"{container}:local" + container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}) + + # Check if this is in an external stack + if stack_is_external(stack): + container_parent_dir = Path(stack).joinpath("container-build") + temp_build_dir = container_parent_dir.joinpath(container.replace("/", "-")) + temp_build_script_filename = temp_build_dir.joinpath("build.sh") + # Now check if the container exists in the external stack. + if not temp_build_script_filename.exists(): + # If not, revert to building an internal container + container_parent_dir = container_build_dir + else: + container_parent_dir = container_build_dir + + build_dir = container_parent_dir.joinpath(container.replace("/", "-")) + build_script_filename = build_dir.joinpath("build.sh") + if verbose: print(f"Build script filename: {build_script_filename}") if os.path.exists(build_script_filename): - build_command = build_script_filename + build_command = build_script_filename.as_posix() else: if verbose: print(f"No script file found: {build_script_filename}, using default build script") @@ -84,7 +102,7 @@ def process_container(container, repo_full_path = os.path.join(dev_root_path, repo_dir) repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir build_command = os.path.join(container_build_dir, - "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" + "default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}" if not dry_run: if verbose: print(f"Executing: {build_command} with environment: {container_build_env}") @@ -158,7 +176,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args): for container in containers_in_scope: if include_exclude_check(container, include, exclude): - process_container(container, container_build_dir, container_build_env, + process_container(stack, container, container_build_dir, container_build_env, dev_root_path, quiet, verbose, dry_run, continue_on_error) else: if verbose: diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py index 7a656ae7..ace334c4 100644 --- a/stack_orchestrator/build/build_webapp.py +++ b/stack_orchestrator/build/build_webapp.py @@ -60,7 +60,7 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args) - build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet, + build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, verbose, dry_run, continue_on_error) @@ -73,5 +73,5 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" - build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet, + build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, verbose, dry_run, continue_on_error) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py new file mode 100644 index 00000000..1e24794c --- /dev/null +++ b/stack_orchestrator/constants.py @@ -0,0 +1,16 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +stack_file_name = "stack.yml" diff --git a/stack_orchestrator/repos/setup_repositories.py b/stack_orchestrator/repos/setup_repositories.py index feca7897..3612aed0 100644 --- a/stack_orchestrator/repos/setup_repositories.py +++ b/stack_orchestrator/repos/setup_repositories.py @@ -25,7 +25,8 @@ import click import importlib.resources from pathlib import Path import yaml -from stack_orchestrator.util import include_exclude_check +from stack_orchestrator.constants import stack_file_name +from stack_orchestrator.util import include_exclude_check, stack_is_external, error_exit class GitProgress(git.RemoteProgress): @@ -238,13 +239,20 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches repos_in_scope = [] if stack: - # In order to be compatible with Python 3.8 we need to use this hack to get the path: - # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure - stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, "stack.yml") + if stack_is_external(stack): + stack_file_path = Path(stack).joinpath(stack_file_name) + else: + # In order to be compatible with Python 3.8 we need to use this hack to get the path: + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + stack_file_path = Path(__file__).absolute().parent.parent.joinpath("data", "stacks", stack, stack_file_name) + if not stack_file_path.exists(): + error_exit(f"stack {stack} does not exist") with stack_file_path: stack_config = yaml.safe_load(open(stack_file_path, "r")) - # TODO: syntax check the input here - repos_in_scope = stack_config['repos'] + if "repos" not in stack_config: + error_exit(f"stack {stack} does not define any repositories") + else: + repos_in_scope = stack_config["repos"] else: repos_in_scope = all_repos diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py index d3b733a2..97d48963 100644 --- a/stack_orchestrator/util.py +++ b/stack_orchestrator/util.py @@ -150,6 +150,12 @@ def get_parsed_deployment_spec(spec_file): sys.exit(1) +def stack_is_external(stack: str): + # Bit of a hack: if the supplied stack string represents + # a path that exists then we assume it must be external + return Path(stack).exists() if stack is not None else False + + def get_yaml(): # See: https://stackoverflow.com/a/45701840/1701505 yaml = ruamel.yaml.YAML() @@ -167,3 +173,8 @@ def global_options(ctx): # TODO: hack def global_options2(ctx): return ctx.parent.obj + + +def error_exit(s): + print(f"ERROR: {s}") + sys.exit(1) From 2059d67dcabaf34f27fbd45ca6d325b28754a397 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 15 Nov 2023 11:54:27 -0600 Subject: [PATCH 33/62] Add run-webapp command. (#651) --- .../container-build/cerc-nextjs-base/build.sh | 2 +- .../deploy/compose/deploy_docker.py | 5 +- stack_orchestrator/deploy/deployer.py | 2 +- stack_orchestrator/deploy/k8s/deploy_k8s.py | 2 +- stack_orchestrator/deploy/run_webapp.py | 59 +++++++++++++++++++ stack_orchestrator/main.py | 2 + 6 files changed, 67 insertions(+), 5 deletions(-) create mode 100644 stack_orchestrator/deploy/run_webapp.py diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh index 342dd3cd..cca8d64b 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh @@ -23,7 +23,7 @@ Built host container for $CERC_CONTAINER_BUILD_WORK_DIR with tag: To test locally run: - docker run -p 3000:3000 --env-file /path/to/environment.env $CERC_CONTAINER_BUILD_TAG + laconic-so run-webapp --image $CERC_CONTAINER_BUILD_TAG --env-file /path/to/environment.env EOF fi diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index 79ab1482..b37f3cf0 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -61,9 +61,10 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) - def run(self, image, command, user, volumes, entrypoint=None): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): try: - return self.docker.run(image=image, command=command, user=user, volumes=volumes, entrypoint=entrypoint) + return self.docker.run(image=image, command=command, user=user, volumes=volumes, + entrypoint=entrypoint, envs=env, detach=detach, publish_all=True) except DockerException as e: raise DeployerException(e) diff --git a/stack_orchestrator/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py index 68b0088a..79379c3d 100644 --- a/stack_orchestrator/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -44,7 +44,7 @@ class Deployer(ABC): pass @abstractmethod - def run(self, image, command, user, volumes, entrypoint): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): pass diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index bc256b6b..627d6e0b 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -120,7 +120,7 @@ class K8sDeployer(Deployer): log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test") return log_stream_from_string(log_data) - def run(self, image, command, user, volumes, entrypoint=None): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): # We need to figure out how to do this -- check why we're being called first pass diff --git a/stack_orchestrator/deploy/run_webapp.py b/stack_orchestrator/deploy/run_webapp.py new file mode 100644 index 00000000..8b1073b1 --- /dev/null +++ b/stack_orchestrator/deploy/run_webapp.py @@ -0,0 +1,59 @@ +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds webapp containers + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import hashlib +import click + +from dotenv import dotenv_values +from stack_orchestrator.deploy.deployer_factory import getDeployer + + +@click.command() +@click.option("--image", help="image to deploy", required=True) +@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')") +@click.option("--env-file", help="environment file for webapp") +@click.pass_context +def command(ctx, image, deploy_to, env_file): + '''build the specified webapp container''' + + env = {} + if env_file: + env = dotenv_values(env_file) + + unique_cluster_descriptor = f"{image},{env}" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() + cluster = f"laconic-webapp-{hash}" + + deployer = getDeployer(deploy_to, + deployment_dir=None, + compose_files=None, + compose_project_name=cluster, + compose_env_file=None) + + container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True) + + # Make configurable? + webappPort = "3000/tcp" + # TODO: This assumes a Docker container object... + if webappPort in container.network_settings.ports: + mapping = container.network_settings.ports[webappPort][0] + print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""") diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py index 0b0585e0..8ee8ae61 100644 --- a/stack_orchestrator/main.py +++ b/stack_orchestrator/main.py @@ -20,6 +20,7 @@ from stack_orchestrator.repos import setup_repositories from stack_orchestrator.build import build_containers from stack_orchestrator.build import build_npms from stack_orchestrator.build import build_webapp +from stack_orchestrator.deploy import run_webapp from stack_orchestrator.deploy import deploy from stack_orchestrator import version from stack_orchestrator.deploy import deployment @@ -50,6 +51,7 @@ cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(build_containers.command, "build-containers") cli.add_command(build_npms.command, "build-npms") cli.add_command(build_webapp.command, "build-webapp") +cli.add_command(run_webapp.command, "run-webapp") cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy-system") cli.add_command(deployment.command, "deployment") From d37f80553d1a7048f30d5312ed4057e79c9b0b69 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 15 Nov 2023 12:28:07 -0600 Subject: [PATCH 34/62] Add webapp doc (#652) --- docs/webapp.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 docs/webapp.md diff --git a/docs/webapp.md b/docs/webapp.md new file mode 100644 index 00000000..19122d46 --- /dev/null +++ b/docs/webapp.md @@ -0,0 +1,42 @@ +### Building and Running Webapps + +It is possible to build and run webapps using the `build-webapp` and `run-webapp` subcommand. + +To make it easier to build once, and deploy to with varying configuration, compilation and static +page generation are separated in the `build-webapp` and `run-webapp` steps, and the use of the +environment variables via `process.env` is detected at compile-time and placeholder substituted +which will be filled in at runtime. + +This offers much more flexibilty in configuration and deployment than standard build methods. + +## Build + +``` +$ cd ~/cerc +$ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git +$ laconic-so build-webapp --source-repo ~/cerc/test-progressive-web-app +... +Successfully tagged cerc/test-progressive-web-app:local + + +################################################################# + +Built host container for ~/cerc/test-progressive-web-app with tag: + + cerc/test-progressive-web-app:local + +To test locally run: + + laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment.env + +``` + +## Run + +``` +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file ~/tmp/env.igloo + +Image: cerc/test-progressive-web-app:local +ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05 +URL: http://localhost:32768 +``` From a13f841f34eff6e4866c9e36a2c5a86215768b05 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 15 Nov 2023 12:37:30 -0600 Subject: [PATCH 35/62] Update webapp.md --- docs/webapp.md | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/docs/webapp.md b/docs/webapp.md index 19122d46..ed321cb4 100644 --- a/docs/webapp.md +++ b/docs/webapp.md @@ -1,25 +1,21 @@ ### Building and Running Webapps -It is possible to build and run webapps using the `build-webapp` and `run-webapp` subcommand. +It is possible to build and run Next.js webapps using the `build-webapp` and `run-webapp` subcommands. -To make it easier to build once, and deploy to with varying configuration, compilation and static -page generation are separated in the `build-webapp` and `run-webapp` steps, and the use of the -environment variables via `process.env` is detected at compile-time and placeholder substituted -which will be filled in at runtime. +To make it easier to build once and deploy into different environments and with different configuration, +compilation and static page generation are separated in the `build-webapp` and `run-webapp` steps. -This offers much more flexibilty in configuration and deployment than standard build methods. +This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed +via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment, +not their build environment. -## Build +## Building ``` $ cd ~/cerc $ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git $ laconic-so build-webapp --source-repo ~/cerc/test-progressive-web-app ... -Successfully tagged cerc/test-progressive-web-app:local - - -################################################################# Built host container for ~/cerc/test-progressive-web-app with tag: @@ -31,7 +27,7 @@ To test locally run: ``` -## Run +## Running ``` $ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file ~/tmp/env.igloo From 99005657148dc2cd1e3a1a23cd0dcc403c49ccd2 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 15 Nov 2023 12:48:58 -0600 Subject: [PATCH 36/62] Update webapp.md --- docs/webapp.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/webapp.md b/docs/webapp.md index ed321cb4..ec1ee252 100644 --- a/docs/webapp.md +++ b/docs/webapp.md @@ -11,6 +11,11 @@ not their build environment. ## Building +Building usually requires no additional configuration. By default, the Next.js version specified in `package.json` +is used, and either `yarn` or `npm` will be used automatically depending on which lock files are present. These +can be overidden with the build arguments `CERC_NEXT_VERSION` and `CERC_BUILD_TOOL` respectively. For example: `--extra-build-args "--build-arg CERC_NEXT_VERSION=13.4.12"` + +**Example**: ``` $ cd ~/cerc $ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git @@ -29,10 +34,21 @@ To test locally run: ## Running +With `run-webapp` a new container will be launched with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration. + +**Example**: ``` -$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file ~/tmp/env.igloo +# Production env +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/staging.env Image: cerc/test-progressive-web-app:local ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05 URL: http://localhost:32768 + +# Dev env +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/dev.env + +Image: cerc/test-progressive-web-app:local +ID: 9ab96494f563aafb6c057d88df58f9eca81b90f8721a4e068493a289a976051c +URL: http://localhost:32769 ``` From 1e9d24a8ce6c6beca572fc5d93258499296db043 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 15 Nov 2023 12:52:34 -0600 Subject: [PATCH 37/62] Update webapp.md --- docs/webapp.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/webapp.md b/docs/webapp.md index ec1ee252..3b1d0609 100644 --- a/docs/webapp.md +++ b/docs/webapp.md @@ -39,7 +39,7 @@ With `run-webapp` a new container will be launched with runtime configuration pr **Example**: ``` # Production env -$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/staging.env +$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/production.env Image: cerc/test-progressive-web-app:local ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05 From 70529c43e779f2ed81d750d61c95192bf9d7e20e Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Thu, 16 Nov 2023 16:27:41 +0530 Subject: [PATCH 38/62] Upgrade merkl and sushiswap watcher versions (#654) --- stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml | 2 +- stack_orchestrator/data/stacks/sushiswap-v3/stack.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml index 8b010a24..8f5cb7ee 100644 --- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: merkl-sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.1 + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.2 containers: - cerc/watcher-merkl-sushiswap-v3 pods: diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml index 66065b5d..05350996 100644 --- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.1 + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.2 containers: - cerc/watcher-sushiswap-v3 pods: From 5c80887215481c17ee41a68a3559f66f4fa2e925 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Thu, 16 Nov 2023 12:58:03 -0600 Subject: [PATCH 39/62] Fix missing tty parameter. (#653) --- stack_orchestrator/deploy/deploy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 1c467067..f931a0d0 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -155,7 +155,7 @@ def exec_operation(ctx, extra_args): if global_context.verbose: print(f"Running compose exec {service_name} {command_to_exec}") try: - ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env) + ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True) except DeployerException: print("container command returned error exit status") From c9c6a0eee3816c74e01b8cda1748daf0bd7654a5 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Nov 2023 09:12:57 -0700 Subject: [PATCH 40/62] Changes for remote k8s (#655) --- stack_orchestrator/constants.py | 6 ++ .../deploy/compose/deploy_docker.py | 7 ++- stack_orchestrator/deploy/deploy.py | 6 +- stack_orchestrator/deploy/deployer_factory.py | 13 ++-- stack_orchestrator/deploy/deployment.py | 7 ++- .../deploy/deployment_create.py | 62 ++++++++++++------- stack_orchestrator/deploy/k8s/deploy_k8s.py | 54 ++++++++++------ tests/k8s-deploy/run-deploy-test.sh | 2 +- 8 files changed, 101 insertions(+), 56 deletions(-) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index 1e24794c..f15e8870 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -14,3 +14,9 @@ # along with this program. If not, see . stack_file_name = "stack.yml" +compose_deploy_type = "compose" +k8s_kind_deploy_type = "k8s-kind" +k8s_deploy_type = "k8s" +kube_config_key = "kube-config" +kind_config_filename = "kind-config.yml" +kube_config_filename = "kubeconfig.yml" diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index b37f3cf0..fc249ebc 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -20,10 +20,12 @@ from stack_orchestrator.deploy.deployer import Deployer, DeployerException, Depl class DockerDeployer(Deployer): name: str = "compose" + type: str - def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name, compose_env_file=compose_env_file) + self.type = type def up(self, detach, services): try: @@ -70,9 +72,8 @@ class DockerDeployer(Deployer): class DockerDeployerConfigGenerator(DeployerConfigGenerator): - config_file_name: str = "kind-config.yml" - def __init__(self) -> None: + def __init__(self, type: str) -> None: super().__init__() # Nothing needed at present for the docker deployer diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index f931a0d0..c01a7e08 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -39,7 +39,7 @@ from stack_orchestrator.deploy.deployment_create import setup as deployment_setu @click.option("--exclude", help="don\'t start these components") @click.option("--env-file", help="env file to be used") @click.option("--cluster", help="specify a non-default cluster name") -@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s)") +@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)") @click.pass_context def command(ctx, include, exclude, env_file, cluster, deploy_to): '''deploy a stack''' @@ -62,11 +62,11 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to): def create_deploy_context( - global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deployer): + global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deploy_to): cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) deployment_dir = deployment_context.deployment_dir if deployment_context else None # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ - deployer = getDeployer(deployer, deployment_dir, compose_files=cluster_context.compose_files, + deployer = getDeployer(deploy_to, deployment_dir, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, compose_env_file=cluster_context.env_file) return DeployCommandContext(stack, cluster_context, deployer) diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py index 5d515418..de2808c5 100644 --- a/stack_orchestrator/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -13,23 +13,24 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from stack_orchestrator import constants from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator def getDeployerConfigGenerator(type: str): if type == "compose" or type is None: - return DockerDeployerConfigGenerator() - elif type == "k8s": - return K8sDeployerConfigGenerator() + return DockerDeployerConfigGenerator(type) + elif type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: + return K8sDeployerConfigGenerator(type) else: print(f"ERROR: deploy-to {type} is not valid") def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file): if type == "compose" or type is None: - return DockerDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file) - elif type == "k8s": - return K8sDeployer(deployment_dir, compose_files, compose_project_name, compose_env_file) + return DockerDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file) + elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: + return K8sDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file) else: print(f"ERROR: deploy-to {type} is not valid") diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index e22d7dcc..586a2b2a 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -16,6 +16,7 @@ import click from pathlib import Path import sys +from stack_orchestrator import constants from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -50,8 +51,12 @@ def make_deploy_context(ctx): stack_file_path = context.get_stack_file() env_file = context.get_env_file() cluster_name = context.get_cluster_name() + if "deploy-to" in context.spec.obj: + deployment_type = context.spec.obj["deploy-to"] + else: + deployment_type = constants.compose_deploy_type return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file, - context.spec.obj["deploy-to"]) + deployment_type) @command.command() diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index c00c0dc6..c9ba3a10 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -21,16 +21,17 @@ from typing import List import random from shutil import copy, copyfile, copytree import sys +from stack_orchestrator import constants from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, - get_pod_script_paths, get_plugin_code_paths) + get_pod_script_paths, get_plugin_code_paths, error_exit) from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator from stack_orchestrator.deploy.deployment_context import DeploymentContext def _make_default_deployment_dir(): - return "deployment-001" + return Path("deployment-001") def _get_ports(stack): @@ -248,17 +249,21 @@ def _parse_config_variables(variable_values: str): @click.command() @click.option("--config", help="Provide config variables for the deployment") +@click.option("--kube-config", help="Provide a config file for a k8s deployment") @click.option("--output", required=True, help="Write yaml spec file here") @click.option("--map-ports-to-host", required=False, help="Map ports to the host as one of: any-variable-random (default), " "localhost-same, any-same, localhost-fixed-random, any-fixed-random") @click.pass_context -def init(ctx, config, output, map_ports_to_host): +def init(ctx, config, kube_config, output, map_ports_to_host): yaml = get_yaml() stack = global_options(ctx).stack debug = global_options(ctx).debug + deployer_type = ctx.obj.deployer.type default_spec_file_content = call_stack_deploy_init(ctx.obj) - spec_file_content = {"stack": stack, "deploy-to": ctx.obj.deployer.name} + spec_file_content = {"stack": stack, "deploy-to": deployer_type} + if deployer_type == "k8s": + spec_file_content.update({constants.kube_config_key: kube_config}) if default_spec_file_content: spec_file_content.update(default_spec_file_content) config_variables = _parse_config_variables(config) @@ -296,6 +301,12 @@ def _write_config_file(spec_file: Path, config_env_file: Path): output_file.write(f"{variable_name}={variable_value}\n") +def _write_kube_config_file(external_path: Path, internal_path: Path): + if not external_path.exists(): + error_exit(f"Kube config file {external_path} does not exist") + copyfile(external_path, internal_path) + + def _copy_files_to_directory(file_paths: List[Path], directory: Path): for path in file_paths: # Using copy to preserve the execute bit @@ -310,29 +321,34 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): @click.option("--initial-peers", help="Initial set of persistent peers") @click.pass_context def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): - # This function fails with a useful error message if the file doens't exist parsed_spec = get_parsed_deployment_spec(spec_file) stack_name = parsed_spec["stack"] + deployment_type = parsed_spec["deploy-to"] stack_file = get_stack_file_path(stack_name) parsed_stack = get_parsed_stack_config(stack_name) if global_options(ctx).debug: print(f"parsed spec: {parsed_spec}") if deployment_dir is None: - deployment_dir = _make_default_deployment_dir() - if os.path.exists(deployment_dir): - print(f"Error: {deployment_dir} already exists") - sys.exit(1) - os.mkdir(deployment_dir) + deployment_dir_path = _make_default_deployment_dir() + else: + deployment_dir_path = Path(deployment_dir) + if deployment_dir_path.exists(): + error_exit(f"{deployment_dir_path} already exists") + os.mkdir(deployment_dir_path) # Copy spec file and the stack file into the deployment dir - copyfile(spec_file, os.path.join(deployment_dir, "spec.yml")) - copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file))) + copyfile(spec_file, deployment_dir_path.joinpath("spec.yml")) + copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) # Copy any config varibles from the spec file into an env file suitable for compose - _write_config_file(spec_file, os.path.join(deployment_dir, "config.env")) + _write_config_file(spec_file, deployment_dir_path.joinpath("config.env")) + # Copy any k8s config file into the deployment dir + if deployment_type == "k8s": + _write_kube_config_file(Path(parsed_spec[constants.kube_config_key]), + deployment_dir_path.joinpath(constants.kube_config_filename)) # Copy the pod files into the deployment dir, fixing up content pods = get_pod_list(parsed_stack) - destination_compose_dir = os.path.join(deployment_dir, "compose") + destination_compose_dir = deployment_dir_path.joinpath("compose") os.mkdir(destination_compose_dir) - destination_pods_dir = os.path.join(deployment_dir, "pods") + destination_pods_dir = deployment_dir_path.joinpath("pods") os.mkdir(destination_pods_dir) data_dir = Path(__file__).absolute().parent.parent.joinpath("data") yaml = get_yaml() @@ -340,12 +356,12 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): pod_file_path = get_pod_file_path(parsed_stack, pod) parsed_pod_file = yaml.load(open(pod_file_path, "r")) extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) - destination_pod_dir = os.path.join(destination_pods_dir, pod) + destination_pod_dir = destination_pods_dir.joinpath(pod) os.mkdir(destination_pod_dir) if global_options(ctx).debug: print(f"extra config dirs: {extra_config_dirs}") _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) - with open(os.path.join(destination_compose_dir, "docker-compose-%s.yml" % pod), "w") as output_file: + with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file: yaml.dump(parsed_pod_file, output_file) # Copy the config files for the pod, if any config_dirs = {pod} @@ -353,13 +369,13 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): for config_dir in config_dirs: source_config_dir = data_dir.joinpath("config", config_dir) if os.path.exists(source_config_dir): - destination_config_dir = os.path.join(deployment_dir, "config", config_dir) + destination_config_dir = deployment_dir_path.joinpath("config", config_dir) # If the same config dir appears in multiple pods, it may already have been copied if not os.path.exists(destination_config_dir): copytree(source_config_dir, destination_config_dir) # Copy the script files for the pod, if any if pod_has_scripts(parsed_stack, pod): - destination_script_dir = os.path.join(destination_pod_dir, "scripts") + destination_script_dir = destination_pod_dir.joinpath("scripts") os.mkdir(destination_script_dir) script_paths = get_pod_script_paths(parsed_stack, pod) _copy_files_to_directory(script_paths, destination_script_dir) @@ -369,11 +385,11 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): deployment_command_context = ctx.obj deployment_command_context.stack = stack_name deployment_context = DeploymentContext() - deployment_context.init(Path(deployment_dir)) + deployment_context.init(deployment_dir_path) # Call the deployer to generate any deployer-specific files (e.g. for kind) - deployer_config_generator = getDeployerConfigGenerator(parsed_spec["deploy-to"]) - # TODO: make deployment_dir a Path above - deployer_config_generator.generate(Path(deployment_dir)) + deployer_config_generator = getDeployerConfigGenerator(deployment_type) + # TODO: make deployment_dir_path a Path above + deployer_config_generator.generate(deployment_dir_path) call_stack_deploy_create(deployment_context, [network_dir, initial_peers]) diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 627d6e0b..44eb9f0e 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -16,6 +16,7 @@ from pathlib import Path from kubernetes import client, config +from stack_orchestrator import constants from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config @@ -25,6 +26,7 @@ from stack_orchestrator.opts import opts class K8sDeployer(Deployer): name: str = "k8s" + type: str core_api: client.CoreV1Api apps_api: client.AppsV1Api k8s_namespace: str = "default" @@ -32,28 +34,35 @@ class K8sDeployer(Deployer): cluster_info : ClusterInfo deployment_dir: Path - def __init__(self, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: if (opts.o.debug): print(f"Deployment dir: {deployment_dir}") print(f"Compose files: {compose_files}") print(f"Project name: {compose_project_name}") print(f"Env file: {compose_env_file}") + print(f"Type: {type}") + self.type = type self.deployment_dir = deployment_dir self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() self.cluster_info.int(compose_files, compose_env_file) def connect_api(self): - config.load_kube_config(context=f"kind-{self.kind_cluster_name}") + if self.is_kind(): + config.load_kube_config(context=f"kind-{self.kind_cluster_name}") + else: + # Get the config file and pass to load_kube_config() + config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix()) self.core_api = client.CoreV1Api() self.apps_api = client.AppsV1Api() def up(self, detach, services): - # Create the kind cluster - create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath("kind-config.yml")) + if self.is_kind(): + # Create the kind cluster + create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) + # Ensure the referenced containers are copied into kind + load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) self.connect_api() - # Ensure the referenced containers are copied into kind - load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set) # Create the host-path-mounted PVs for this deployment pvs = self.cluster_info.get_pvs() @@ -89,8 +98,9 @@ class K8sDeployer(Deployer): def down(self, timeout, volumes): # Delete the k8s objects - # Destroy the kind cluster - destroy_cluster(self.kind_cluster_name) + if self.is_kind(): + # Destroy the kind cluster + destroy_cluster(self.kind_cluster_name) def ps(self): self.connect_api() @@ -124,20 +134,26 @@ class K8sDeployer(Deployer): # We need to figure out how to do this -- check why we're being called first pass + def is_kind(self): + return self.type == "k8s-kind" + class K8sDeployerConfigGenerator(DeployerConfigGenerator): - config_file_name: str = "kind-config.yml" + type: str - def __init__(self) -> None: + def __init__(self, type: str) -> None: + self.type = type super().__init__() def generate(self, deployment_dir: Path): - # Check the file isn't already there - # Get the config file contents - content = generate_kind_config(deployment_dir) - if opts.o.debug: - print(f"kind config is: {content}") - config_file = deployment_dir.joinpath(self.config_file_name) - # Write the file - with open(config_file, "w") as output_file: - output_file.write(content) + # No need to do this for the remote k8s case + if self.type == "k8s-kind": + # Check the file isn't already there + # Get the config file contents + content = generate_kind_config(deployment_dir) + if opts.o.debug: + print(f"kind config is: {content}") + config_file = deployment_dir.joinpath(constants.kind_config_filename) + # Write the file + with open(config_file, "w") as output_file: + output_file.write(content) diff --git a/tests/k8s-deploy/run-deploy-test.sh b/tests/k8s-deploy/run-deploy-test.sh index 91c7890c..b7ee9dd0 100755 --- a/tests/k8s-deploy/run-deploy-test.sh +++ b/tests/k8s-deploy/run-deploy-test.sh @@ -21,7 +21,7 @@ mkdir -p $CERC_REPO_BASE_DIR # Test basic stack-orchestrator deploy test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml -$TEST_TARGET_SO --stack test deploy --deploy-to k8s init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED +$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED # Check the file now exists if [ ! -f "$test_deployment_spec" ]; then echo "deploy init test: spec file not present" From f6624cb33a5a3b0d9a5d1fb013a9da0a18f386b1 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 20 Nov 2023 20:23:55 -0700 Subject: [PATCH 41/62] Add image push command (#656) --- stack_orchestrator/constants.py | 2 + .../deploy/compose/deploy_docker.py | 3 +- stack_orchestrator/deploy/deploy.py | 13 +- stack_orchestrator/deploy/deploy_util.py | 30 ++++- stack_orchestrator/deploy/deployer_factory.py | 6 +- stack_orchestrator/deploy/deployment.py | 16 ++- .../deploy/deployment_create.py | 14 ++- stack_orchestrator/deploy/images.py | 62 +++++++++ stack_orchestrator/deploy/k8s/cluster_info.py | 24 ++-- stack_orchestrator/deploy/k8s/deploy_k8s.py | 39 +++++- stack_orchestrator/deploy/k8s/helpers.py | 15 +-- stack_orchestrator/deploy/run_webapp.py | 118 +++++++++--------- 12 files changed, 238 insertions(+), 104 deletions(-) create mode 100644 stack_orchestrator/deploy/images.py diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index f15e8870..aedc4f3c 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -18,5 +18,7 @@ compose_deploy_type = "compose" k8s_kind_deploy_type = "k8s-kind" k8s_deploy_type = "k8s" kube_config_key = "kube-config" +deploy_to_key = "deploy-to" +image_resigtry_key = "image-registry" kind_config_filename = "kind-config.yml" kube_config_filename = "kubeconfig.yml" diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index fc249ebc..4b4e7426 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -16,13 +16,14 @@ from pathlib import Path from python_on_whales import DockerClient, DockerException from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator +from stack_orchestrator.deploy.deployment_context import DeploymentContext class DockerDeployer(Deployer): name: str = "compose" type: str - def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name, compose_env_file=compose_env_file) self.type = type diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index c01a7e08..32c13a61 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -62,11 +62,16 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to): def create_deploy_context( - global_context, deployment_context: DeploymentContext, stack, include, exclude, cluster, env_file, deploy_to): + global_context, + deployment_context: DeploymentContext, + stack, + include, + exclude, + cluster, + env_file, + deploy_to) -> DeployCommandContext: cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) - deployment_dir = deployment_context.deployment_dir if deployment_context else None - # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ - deployer = getDeployer(deploy_to, deployment_dir, compose_files=cluster_context.compose_files, + deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, compose_env_file=cluster_context.env_file) return DeployCommandContext(stack, cluster_context, deployer) diff --git a/stack_orchestrator/deploy/deploy_util.py b/stack_orchestrator/deploy/deploy_util.py index 9829490d..8b812d3a 100644 --- a/stack_orchestrator/deploy/deploy_util.py +++ b/stack_orchestrator/deploy/deploy_util.py @@ -14,9 +14,10 @@ # along with this program. If not, see . import os -from typing import List +from typing import List, Any from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list +from stack_orchestrator.opts import opts def _container_image_from_service(stack: str, service: str): @@ -37,6 +38,33 @@ def _container_image_from_service(stack: str, service: str): return image_name +def parsed_pod_files_map_from_file_names(pod_files): + parsed_pod_yaml_map : Any = {} + for pod_file in pod_files: + with open(pod_file, "r") as pod_file_descriptor: + parsed_pod_file = get_yaml().load(pod_file_descriptor) + parsed_pod_yaml_map[pod_file] = parsed_pod_file + if opts.o.debug: + print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}") + return parsed_pod_yaml_map + + +def images_for_deployment(pod_files: List[str]): + image_set = set() + parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) + # Find the set of images in the pods + for pod_name in parsed_pod_yaml_map: + pod = parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + service_info = services[service_name] + image = service_info["image"] + image_set.add(image) + if opts.o.debug: + print(f"image_set: {image_set}") + return image_set + + def _volumes_to_docker(mounts: List[VolumeMapping]): # Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")] result = [] diff --git a/stack_orchestrator/deploy/deployer_factory.py b/stack_orchestrator/deploy/deployer_factory.py index de2808c5..959c1b7a 100644 --- a/stack_orchestrator/deploy/deployer_factory.py +++ b/stack_orchestrator/deploy/deployer_factory.py @@ -27,10 +27,10 @@ def getDeployerConfigGenerator(type: str): print(f"ERROR: deploy-to {type} is not valid") -def getDeployer(type: str, deployment_dir, compose_files, compose_project_name, compose_env_file): +def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file): if type == "compose" or type is None: - return DockerDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file) + return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type: - return K8sDeployer(type, deployment_dir, compose_files, compose_project_name, compose_env_file) + return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file) else: print(f"ERROR: deploy-to {type} is not valid") diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index 586a2b2a..8d74a62d 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -17,8 +17,10 @@ import click from pathlib import Path import sys from stack_orchestrator import constants +from stack_orchestrator.deploy.images import push_images_operation from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context +from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -46,13 +48,13 @@ def command(ctx, dir): ctx.obj = deployment_context -def make_deploy_context(ctx): +def make_deploy_context(ctx) -> DeployCommandContext: context: DeploymentContext = ctx.obj stack_file_path = context.get_stack_file() env_file = context.get_env_file() cluster_name = context.get_cluster_name() - if "deploy-to" in context.spec.obj: - deployment_type = context.spec.obj["deploy-to"] + if constants.deploy_to_key in context.spec.obj: + deployment_type = context.spec.obj[constants.deploy_to_key] else: deployment_type = constants.compose_deploy_type return create_deploy_context(ctx.parent.parent.obj, context, stack_file_path, None, None, cluster_name, env_file, @@ -109,6 +111,14 @@ def ps(ctx): ps_operation(ctx) +@command.command() +@click.pass_context +def push_images(ctx): + deploy_command_context: DeployCommandContext = make_deploy_context(ctx) + deployment_context: DeploymentContext = ctx.obj + push_images_operation(deploy_command_context, deployment_context) + + @command.command() @click.argument('extra_args', nargs=-1) # help: command: port @click.pass_context diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index c9ba3a10..e999c1df 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -250,20 +250,28 @@ def _parse_config_variables(variable_values: str): @click.command() @click.option("--config", help="Provide config variables for the deployment") @click.option("--kube-config", help="Provide a config file for a k8s deployment") +@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") @click.option("--output", required=True, help="Write yaml spec file here") @click.option("--map-ports-to-host", required=False, help="Map ports to the host as one of: any-variable-random (default), " "localhost-same, any-same, localhost-fixed-random, any-fixed-random") @click.pass_context -def init(ctx, config, kube_config, output, map_ports_to_host): +def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): yaml = get_yaml() stack = global_options(ctx).stack debug = global_options(ctx).debug deployer_type = ctx.obj.deployer.type default_spec_file_content = call_stack_deploy_init(ctx.obj) - spec_file_content = {"stack": stack, "deploy-to": deployer_type} + spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} if deployer_type == "k8s": spec_file_content.update({constants.kube_config_key: kube_config}) + spec_file_content.update({constants.image_resigtry_key: image_registry}) + else: + # Check for --kube-config supplied for non-relevant deployer types + if kube_config is not None: + error_exit(f"--kube-config is not allowed with a {deployer_type} deployment") + if image_registry is not None: + error_exit(f"--image-registry is not allowed with a {deployer_type} deployment") if default_spec_file_content: spec_file_content.update(default_spec_file_content) config_variables = _parse_config_variables(config) @@ -323,7 +331,7 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): parsed_spec = get_parsed_deployment_spec(spec_file) stack_name = parsed_spec["stack"] - deployment_type = parsed_spec["deploy-to"] + deployment_type = parsed_spec[constants.deploy_to_key] stack_file = get_stack_file_path(stack_name) parsed_stack = get_parsed_stack_config(stack_name) if global_options(ctx).debug: diff --git a/stack_orchestrator/deploy/images.py b/stack_orchestrator/deploy/images.py new file mode 100644 index 00000000..ddbb33f7 --- /dev/null +++ b/stack_orchestrator/deploy/images.py @@ -0,0 +1,62 @@ +# Copyright © 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from typing import Set + +from python_on_whales import DockerClient + +from stack_orchestrator import constants +from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.deploy.deploy_types import DeployCommandContext +from stack_orchestrator.deploy.deploy_util import images_for_deployment + + +def _image_needs_pushed(image: str): + # TODO: this needs to be more intelligent + return image.endswith(":local") + + +def remote_tag_for_image(image: str, remote_repo_url: str): + # Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy + (org, image_name_with_version) = image.split("/") + (image_name, image_version) = image_name_with_version.split(":") + if image_version == "local": + return f"{remote_repo_url}/{image_name}:deploy" + else: + return image + + +# TODO: needs lots of error handling +def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext): + # Get the list of images for the stack + cluster_context = command_context.cluster_context + images: Set[str] = images_for_deployment(cluster_context.compose_files) + # Tag the images for the remote repo + remote_repo_url = deployment_context.spec.obj[constants.image_resigtry_key] + docker = DockerClient() + for image in images: + if _image_needs_pushed(image): + remote_tag = remote_tag_for_image(image, remote_repo_url) + if opts.o.verbose: + print(f"Tagging {image} to {remote_tag}") + docker.image.tag(image, remote_tag) + # Run docker push commands to upload + for image in images: + if _image_needs_pushed(image): + remote_tag = remote_tag_for_image(image, remote_repo_url) + if opts.o.verbose: + print(f"Pushing image {remote_tag}") + docker.image.push(remote_tag) diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 9275db2b..ff052bf9 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -18,34 +18,30 @@ from typing import Any, List, Set from stack_orchestrator.opts import opts from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files -from stack_orchestrator.deploy.k8s.helpers import parsed_pod_files_map_from_file_names, get_node_pv_mount_path +from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map +from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_types import DeployEnvVars +from stack_orchestrator.deploy.images import remote_tag_for_image class ClusterInfo: - parsed_pod_yaml_map: Any = {} + parsed_pod_yaml_map: Any image_set: Set[str] = set() app_name: str = "test-app" deployment_name: str = "test-deployment" environment_variables: DeployEnvVars + remote_image_repo: str def __init__(self) -> None: pass - def int(self, pod_files: List[str], compose_env_file): + def int(self, pod_files: List[str], compose_env_file, remote_image_repo): self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods - for pod_name in self.parsed_pod_yaml_map: - pod = self.parsed_pod_yaml_map[pod_name] - services = pod["services"] - for service_name in services: - service_info = services[service_name] - image = service_info["image"] - self.image_set.add(image) - if opts.o.debug: - print(f"image_set: {self.image_set}") + self.image_set = images_for_deployment(pod_files) self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) + self.remote_image_repo = remote_image_repo if (opts.o.debug): print(f"Env vars: {self.environment_variables.map}") @@ -99,10 +95,12 @@ class ClusterInfo: container_name = service_name service_info = services[service_name] image = service_info["image"] + # Re-write the image tag for remote deployment + image_to_use = remote_tag_for_image(image, self.remote_image_repo) if self.remote_image_repo is not None else image volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) container = client.V1Container( name=container_name, - image=image, + image=image_to_use, env=envs_from_environment_variables_map(self.environment_variables.map), ports=[client.V1ContainerPort(container_port=80)], volume_mounts=volume_mounts, diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 44eb9f0e..3d0ef3ff 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -22,6 +22,7 @@ from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluste from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream_from_string, generate_kind_config from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo from stack_orchestrator.opts import opts +from stack_orchestrator.deploy.deployment_context import DeploymentContext class K8sDeployer(Deployer): @@ -33,19 +34,21 @@ class K8sDeployer(Deployer): kind_cluster_name: str cluster_info : ClusterInfo deployment_dir: Path + deployment_context: DeploymentContext - def __init__(self, type, deployment_dir, compose_files, compose_project_name, compose_env_file) -> None: + def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: if (opts.o.debug): - print(f"Deployment dir: {deployment_dir}") + print(f"Deployment dir: {deployment_context.deployment_dir}") print(f"Compose files: {compose_files}") print(f"Project name: {compose_project_name}") print(f"Env file: {compose_env_file}") print(f"Type: {type}") self.type = type - self.deployment_dir = deployment_dir + self.deployment_dir = deployment_context.deployment_dir + self.deployment_context = deployment_context self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() - self.cluster_info.int(compose_files, compose_env_file) + self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key]) def connect_api(self): if self.is_kind(): @@ -97,7 +100,35 @@ class K8sDeployer(Deployer): {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") def down(self, timeout, volumes): + self.connect_api() # Delete the k8s objects + # Create the host-path-mounted PVs for this deployment + pvs = self.cluster_info.get_pvs() + for pv in pvs: + if opts.o.debug: + print(f"Deleting this pv: {pv}") + pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) + if opts.o.debug: + print("PV deleted:") + print(f"{pv_resp}") + + # Figure out the PVCs for this deployment + pvcs = self.cluster_info.get_pvcs() + for pvc in pvcs: + if opts.o.debug: + print(f"Deleting this pvc: {pvc}") + pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(name=pvc.metadata.name, namespace=self.k8s_namespace) + if opts.o.debug: + print("PVCs deleted:") + print(f"{pvc_resp}") + # Process compose files into a Deployment + deployment = self.cluster_info.get_deployment() + # Create the k8s objects + if opts.o.debug: + print(f"Deleting this deployment: {deployment}") + self.apps_api.delete_namespaced_deployment( + name=deployment.metadata.name, namespace=self.k8s_namespace + ) if self.is_kind(): # Destroy the kind cluster destroy_cluster(self.kind_cluster_name) diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index db1ef075..82a33792 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -18,10 +18,10 @@ from dotenv import dotenv_values import os from pathlib import Path import subprocess -from typing import Any, Set, Mapping, List +from typing import Set, Mapping, List from stack_orchestrator.opts import opts -from stack_orchestrator.util import get_yaml +from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names def _run_command(command: str): @@ -133,17 +133,6 @@ def _make_absolute_host_path(data_mount_path: Path, deployment_dir: Path) -> Pat return Path.cwd().joinpath(deployment_dir.joinpath("compose").joinpath(data_mount_path)).resolve() -def parsed_pod_files_map_from_file_names(pod_files): - parsed_pod_yaml_map : Any = {} - for pod_file in pod_files: - with open(pod_file, "r") as pod_file_descriptor: - parsed_pod_file = get_yaml().load(pod_file_descriptor) - parsed_pod_yaml_map[pod_file] = parsed_pod_file - if opts.o.debug: - print(f"parsed_pod_yaml_map: {parsed_pod_yaml_map}") - return parsed_pod_yaml_map - - def _generate_kind_mounts(parsed_pod_files, deployment_dir): volume_definitions = [] volume_host_path_map = _get_host_paths_for_volumes(parsed_pod_files) diff --git a/stack_orchestrator/deploy/run_webapp.py b/stack_orchestrator/deploy/run_webapp.py index 8b1073b1..aa22acdf 100644 --- a/stack_orchestrator/deploy/run_webapp.py +++ b/stack_orchestrator/deploy/run_webapp.py @@ -1,59 +1,59 @@ -# Copyright © 2022, 2023 Vulcanize - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -# Builds webapp containers - -# env vars: -# CERC_REPO_BASE_DIR defaults to ~/cerc - -# TODO: display the available list of containers; allow re-build of either all or specific containers - -import hashlib -import click - -from dotenv import dotenv_values -from stack_orchestrator.deploy.deployer_factory import getDeployer - - -@click.command() -@click.option("--image", help="image to deploy", required=True) -@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')") -@click.option("--env-file", help="environment file for webapp") -@click.pass_context -def command(ctx, image, deploy_to, env_file): - '''build the specified webapp container''' - - env = {} - if env_file: - env = dotenv_values(env_file) - - unique_cluster_descriptor = f"{image},{env}" - hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() - cluster = f"laconic-webapp-{hash}" - - deployer = getDeployer(deploy_to, - deployment_dir=None, - compose_files=None, - compose_project_name=cluster, - compose_env_file=None) - - container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True) - - # Make configurable? - webappPort = "3000/tcp" - # TODO: This assumes a Docker container object... - if webappPort in container.network_settings.ports: - mapping = container.network_settings.ports[webappPort][0] - print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""") +# Copyright © 2022, 2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds webapp containers + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import hashlib +import click + +from dotenv import dotenv_values +from stack_orchestrator.deploy.deployer_factory import getDeployer + + +@click.command() +@click.option("--image", help="image to deploy", required=True) +@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')") +@click.option("--env-file", help="environment file for webapp") +@click.pass_context +def command(ctx, image, deploy_to, env_file): + '''build the specified webapp container''' + + env = {} + if env_file: + env = dotenv_values(env_file) + + unique_cluster_descriptor = f"{image},{env}" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() + cluster = f"laconic-webapp-{hash}" + + deployer = getDeployer(deploy_to, + deployment_context=None, + compose_files=None, + compose_project_name=cluster, + compose_env_file=None) + + container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True) + + # Make configurable? + webappPort = "3000/tcp" + # TODO: This assumes a Docker container object... + if webappPort in container.network_settings.ports: + mapping = container.network_settings.ports[webappPort][0] + print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""") From 0b87c12c134dfc18b5e76447842b43d956a46ace Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Tue, 21 Nov 2023 19:07:09 +0530 Subject: [PATCH 42/62] Upgrade merkl and sushiswap watcher to `v0.1.4` (#657) * Upgrade merkl and sushi watcher versions * Set gqlPath to base URL and remove filling start block * Upgrade watcher versions to 0.1.4 --- .../data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml | 1 - .../data/compose/docker-compose-watcher-sushiswap-v3.yml | 1 - .../data/config/watcher-merkl-sushiswap-v3/start-server.sh | 3 --- .../watcher-merkl-sushiswap-v3/watcher-config-template.toml | 1 + .../data/config/watcher-sushiswap-v3/start-server.sh | 3 --- .../config/watcher-sushiswap-v3/watcher-config-template.toml | 1 + stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml | 2 +- stack_orchestrator/data/stacks/sushiswap-v3/stack.yml | 2 +- 8 files changed, 4 insertions(+), 10 deletions(-) diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml index d08c6214..0a83af89 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-merkl-sushiswap-v3.yml @@ -56,7 +56,6 @@ services: environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} - SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560} command: ["bash", "./start-server.sh"] volumes: - ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml index 219688db..f7b75ca5 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-sushiswap-v3.yml @@ -56,7 +56,6 @@ services: environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} - SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560} command: ["bash", "./start-server.sh"] volumes: - ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh index 1b14f2e3..e2bbdaad 100755 --- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/start-server.sh @@ -16,8 +16,5 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ # Write the modified content to a new file echo "$WATCHER_CONFIG" > environments/local.toml -echo "Initializing watcher..." -yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1)) - echo "Running server..." DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml index 894a4660..f5355a4b 100644 --- a/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-merkl-sushiswap-v3/watcher-config-template.toml @@ -2,6 +2,7 @@ host = "0.0.0.0" port = 3008 kind = "active" + gqlPath = '/' # Checkpointing state. checkpointing = true diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh index 1b14f2e3..e2bbdaad 100755 --- a/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/start-server.sh @@ -16,8 +16,5 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ # Write the modified content to a new file echo "$WATCHER_CONFIG" > environments/local.toml -echo "Initializing watcher..." -yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1)) - echo "Running server..." DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml index 07880a8d..7cfabedd 100644 --- a/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-sushiswap-v3/watcher-config-template.toml @@ -2,6 +2,7 @@ host = "0.0.0.0" port = 3008 kind = "active" + gqlPath = "/" # Checkpointing state. checkpointing = true diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml index 8f5cb7ee..3f9dd43e 100644 --- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: merkl-sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.2 + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.4 containers: - cerc/watcher-merkl-sushiswap-v3 pods: diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml index 05350996..49c604bf 100644 --- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.2 + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.4 containers: - cerc/watcher-sushiswap-v3 pods: From 01029cf7aa0cedac6b184964c19950263172a041 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 21 Nov 2023 08:35:31 -0700 Subject: [PATCH 43/62] Fix for code path that doesn't create a DeploymentContext (#658) --- stack_orchestrator/deploy/k8s/deploy_k8s.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 3d0ef3ff..483f64c6 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -37,18 +37,21 @@ class K8sDeployer(Deployer): deployment_context: DeploymentContext def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None: + self.type = type + # TODO: workaround pending refactoring above to cope with being created with a null deployment_context + if deployment_context is None: + return + self.deployment_dir = deployment_context.deployment_dir + self.deployment_context = deployment_context + self.kind_cluster_name = compose_project_name + self.cluster_info = ClusterInfo() + self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key]) if (opts.o.debug): print(f"Deployment dir: {deployment_context.deployment_dir}") print(f"Compose files: {compose_files}") print(f"Project name: {compose_project_name}") print(f"Env file: {compose_env_file}") print(f"Type: {type}") - self.type = type - self.deployment_dir = deployment_context.deployment_dir - self.deployment_context = deployment_context - self.kind_cluster_name = compose_project_name - self.cluster_info = ClusterInfo() - self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key]) def connect_api(self): if self.is_kind(): From 87bedde5cbfe00a039499026b2c4a53d8daff471 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 21 Nov 2023 16:04:36 -0700 Subject: [PATCH 44/62] Support for k8s ingress and tls (#659) --- stack_orchestrator/constants.py | 2 + .../deploy/deployment_create.py | 6 +- stack_orchestrator/deploy/k8s/cluster_info.py | 83 +++++++++++++++-- stack_orchestrator/deploy/k8s/deploy_k8s.py | 92 ++++++++++++++++--- stack_orchestrator/deploy/spec.py | 12 +++ 5 files changed, 172 insertions(+), 23 deletions(-) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index aedc4f3c..1cff6055 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -19,6 +19,8 @@ k8s_kind_deploy_type = "k8s-kind" k8s_deploy_type = "k8s" kube_config_key = "kube-config" deploy_to_key = "deploy-to" +network_key = "network" +http_proxy_key = "http-proxy" image_resigtry_key = "image-registry" kind_config_filename = "kind-config.yml" kube_config_filename = "kubeconfig.yml" diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index e999c1df..64647ab2 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -103,8 +103,8 @@ def _fixup_pod_file(pod, spec, compose_dir): } pod["volumes"][volume] = new_volume_spec # Fix up ports - if "ports" in spec: - spec_ports = spec["ports"] + if "network" in spec and "ports" in spec["network"]: + spec_ports = spec["network"]["ports"] for container_name, container_ports in spec_ports.items(): if container_name in pod["services"]: pod["services"][container_name]["ports"] = container_ports @@ -285,7 +285,7 @@ def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") ports = _get_mapped_ports(stack, map_ports_to_host) - spec_file_content["ports"] = ports + spec_file_content.update({"network": {"ports": ports}}) named_volumes = _get_named_volumes(stack) if named_volumes: diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index ff052bf9..a7426804 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -22,6 +22,7 @@ from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_types import DeployEnvVars +from stack_orchestrator.deploy.spec import Spec from stack_orchestrator.deploy.images import remote_tag_for_image @@ -29,22 +30,91 @@ class ClusterInfo: parsed_pod_yaml_map: Any image_set: Set[str] = set() app_name: str = "test-app" - deployment_name: str = "test-deployment" environment_variables: DeployEnvVars - remote_image_repo: str + spec: Spec def __init__(self) -> None: pass - def int(self, pod_files: List[str], compose_env_file, remote_image_repo): + def int(self, pod_files: List[str], compose_env_file, spec: Spec): self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods self.image_set = images_for_deployment(pod_files) self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) - self.remote_image_repo = remote_image_repo + self.spec = spec if (opts.o.debug): print(f"Env vars: {self.environment_variables.map}") + def get_ingress(self): + # No ingress for a deployment that has no http-proxy defined, for now + http_proxy_info_list = self.spec.get_http_proxy() + ingress = None + if http_proxy_info_list: + # TODO: handle multiple definitions + http_proxy_info = http_proxy_info_list[0] + if opts.o.debug: + print(f"http-proxy: {http_proxy_info}") + # TODO: good enough parsing for webapp deployment for now + host_name = http_proxy_info["host-name"] + rules = [] + tls = [client.V1IngressTLS( + hosts=[host_name], + secret_name=f"{self.app_name}-tls" + )] + paths = [] + for route in http_proxy_info["routes"]: + path = route["path"] + proxy_to = route["proxy-to"] + if opts.o.debug: + print(f"proxy config: {path} -> {proxy_to}") + paths.append(client.V1HTTPIngressPath( + path_type="Prefix", + path=path, + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + # TODO: this looks wrong + name=f"{self.app_name}-service", + # TODO: pull port number from the service + port=client.V1ServiceBackendPort(number=80) + ) + ) + )) + rules.append(client.V1IngressRule( + host=host_name, + http=client.V1HTTPIngressRuleValue( + paths=paths + ) + )) + spec = client.V1IngressSpec( + tls=tls, + rules=rules + ) + ingress = client.V1Ingress( + metadata=client.V1ObjectMeta( + name=f"{self.app_name}-ingress", + annotations={ + "kubernetes.io/ingress.class": "nginx", + "cert-manager.io/cluster-issuer": "letsencrypt-prod" + } + ), + spec=spec + ) + return ingress + + def get_service(self): + service = client.V1Service( + metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), + spec=client.V1ServiceSpec( + type="ClusterIP", + ports=[client.V1ServicePort( + port=80, + target_port=80 + )], + selector={"app": self.app_name} + ) + ) + return service + def get_pvcs(self): result = [] volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map) @@ -96,7 +166,8 @@ class ClusterInfo: service_info = services[service_name] image = service_info["image"] # Re-write the image tag for remote deployment - image_to_use = remote_tag_for_image(image, self.remote_image_repo) if self.remote_image_repo is not None else image + image_to_use = remote_tag_for_image( + image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name) container = client.V1Container( name=container_name, @@ -123,7 +194,7 @@ class ClusterInfo: deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", - metadata=client.V1ObjectMeta(name=self.deployment_name), + metadata=client.V1ObjectMeta(name=f"{self.app_name}-deployment"), spec=spec, ) return deployment diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 483f64c6..8e790d10 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -23,6 +23,15 @@ from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, log_stream from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo from stack_orchestrator.opts import opts from stack_orchestrator.deploy.deployment_context import DeploymentContext +from stack_orchestrator.util import error_exit + + +def _check_delete_exception(e: client.exceptions.ApiException): + if e.status == 404: + if opts.o.debug: + print("Failed to delete object, continuing") + else: + error_exit(f"k8s api error: {e}") class K8sDeployer(Deployer): @@ -30,6 +39,7 @@ class K8sDeployer(Deployer): type: str core_api: client.CoreV1Api apps_api: client.AppsV1Api + networking_api: client.NetworkingV1Api k8s_namespace: str = "default" kind_cluster_name: str cluster_info : ClusterInfo @@ -45,7 +55,7 @@ class K8sDeployer(Deployer): self.deployment_context = deployment_context self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() - self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec.obj[constants.image_resigtry_key]) + self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec) if (opts.o.debug): print(f"Deployment dir: {deployment_context.deployment_dir}") print(f"Compose files: {compose_files}") @@ -60,9 +70,11 @@ class K8sDeployer(Deployer): # Get the config file and pass to load_kube_config() config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix()) self.core_api = client.CoreV1Api() + self.networking_api = client.NetworkingV1Api() self.apps_api = client.AppsV1Api() def up(self, detach, services): + if self.is_kind(): # Create the kind cluster create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename)) @@ -102,6 +114,26 @@ class K8sDeployer(Deployer): print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \ {deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}") + service: client.V1Service = self.cluster_info.get_service() + service_resp = self.core_api.create_namespaced_service( + namespace=self.k8s_namespace, + body=service + ) + if opts.o.debug: + print("Service created:") + print(f"{service_resp}") + + # TODO: disable ingress for kind + ingress: client.V1Ingress = self.cluster_info.get_ingress() + + ingress_resp = self.networking_api.create_namespaced_ingress( + namespace=self.k8s_namespace, + body=ingress + ) + if opts.o.debug: + print("Ingress created:") + print(f"{ingress_resp}") + def down(self, timeout, volumes): self.connect_api() # Delete the k8s objects @@ -110,28 +142,60 @@ class K8sDeployer(Deployer): for pv in pvs: if opts.o.debug: print(f"Deleting this pv: {pv}") - pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) - if opts.o.debug: - print("PV deleted:") - print(f"{pv_resp}") + try: + pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name) + if opts.o.debug: + print("PV deleted:") + print(f"{pv_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) # Figure out the PVCs for this deployment pvcs = self.cluster_info.get_pvcs() for pvc in pvcs: if opts.o.debug: print(f"Deleting this pvc: {pvc}") - pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(name=pvc.metadata.name, namespace=self.k8s_namespace) - if opts.o.debug: - print("PVCs deleted:") - print(f"{pvc_resp}") - # Process compose files into a Deployment + try: + pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim( + name=pvc.metadata.name, namespace=self.k8s_namespace + ) + if opts.o.debug: + print("PVCs deleted:") + print(f"{pvc_resp}") + except client.exceptions.ApiException as e: + _check_delete_exception(e) deployment = self.cluster_info.get_deployment() - # Create the k8s objects if opts.o.debug: print(f"Deleting this deployment: {deployment}") - self.apps_api.delete_namespaced_deployment( - name=deployment.metadata.name, namespace=self.k8s_namespace - ) + try: + self.apps_api.delete_namespaced_deployment( + name=deployment.metadata.name, namespace=self.k8s_namespace + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + service: client.V1Service = self.cluster_info.get_service() + if opts.o.debug: + print(f"Deleting service: {service}") + try: + self.core_api.delete_namespaced_service( + namespace=self.k8s_namespace, + name=service.metadata.name + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + + # TODO: disable ingress for kind + ingress: client.V1Ingress = self.cluster_info.get_ingress() + if opts.o.debug: + print(f"Deleting this ingress: {ingress}") + try: + self.networking_api.delete_namespaced_ingress( + name=ingress.metadata.name, namespace=self.k8s_namespace + ) + except client.exceptions.ApiException as e: + _check_delete_exception(e) + if self.is_kind(): # Destroy the kind cluster destroy_cluster(self.kind_cluster_name) diff --git a/stack_orchestrator/deploy/spec.py b/stack_orchestrator/deploy/spec.py index 9ee893b9..c4f791bf 100644 --- a/stack_orchestrator/deploy/spec.py +++ b/stack_orchestrator/deploy/spec.py @@ -16,6 +16,7 @@ from pathlib import Path import typing from stack_orchestrator.util import get_yaml +from stack_orchestrator import constants class Spec: @@ -28,3 +29,14 @@ class Spec: def init_from_file(self, file_path: Path): with file_path: self.obj = get_yaml().load(open(file_path, "r")) + + def get_image_registry(self): + return (self.obj[constants.image_resigtry_key] + if self.obj and constants.image_resigtry_key in self.obj + else None) + + def get_http_proxy(self): + return (self.obj[constants.network_key][constants.http_proxy_key] + if self.obj and constants.network_key in self.obj + and constants.http_proxy_key in self.obj[constants.network_key] + else None) From 1a37255c187040672c83b7f145d21b7517d2706c Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 22 Nov 2023 11:31:30 -0600 Subject: [PATCH 45/62] Tweak laconicd config to allow setting endpoint port and to make the fixturenet restartable. (#660) * Endpoint includes port * Make it restartable * Don't try to remove the mounted directory * Make copy of init.sh --- ...ker-compose-fixturenet-laconic-console.yml | 2 +- .../docker-compose-fixturenet-laconicd.yml | 2 +- .../fixturenet-laconicd/create-fixturenet.sh | 187 +++++++++--------- .../cerc-laconic-console-host/config.yml | 4 +- 4 files changed, 101 insertions(+), 94 deletions(-) diff --git a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml index da2fd95f..a186e761 100644 --- a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconic-console.yml @@ -4,6 +4,6 @@ services: image: cerc/laconic-console-host:local environment: - CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production} - - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost} + - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473} ports: - "80" diff --git a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml index 641229d4..7b48f60d 100644 --- a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml @@ -5,7 +5,7 @@ services: command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] volumes: # The cosmos-sdk node's database directory: - - laconicd-data:/root/.laconicd/data + - laconicd-data:/root/.laconicd # TODO: look at folding these scripts into the container - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh diff --git a/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh b/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh index 9c30bff8..d444fcad 100644 --- a/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh +++ b/stack_orchestrator/data/config/fixturenet-laconicd/create-fixturenet.sh @@ -14,104 +14,111 @@ LOGLEVEL="info" TRACE="--trace" # TRACE="" -# validate dependencies are installed -command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } +if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then + # validate dependencies are installed + command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } -# remove existing daemon and client -rm -rf ~/.laconic* + # remove existing daemon and client + rm -rf $HOME/.laconicd/* + rm -rf $HOME/.laconic/* -make install - -laconicd config keyring-backend $KEYRING -laconicd config chain-id $CHAINID - -# if $KEY exists it should be deleted -laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO - -# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) -laconicd init $MONIKER --chain-id $CHAINID - -# Change parameter token denominations to aphoton -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -# Custom modules -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then - echo "Setting timers for expiry tests." - - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -fi - -if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then - echo "Enabling auction and setting timers." - - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -fi - -# increase block time (?) -cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -# Set gas limit in genesis -cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json - -# disable produce empty block -if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml - else - sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml -fi - -if [[ $1 == "pending" ]]; then - if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml - sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml - else - sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml - sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + if [ -n "`which make`" ]; then + make install fi -fi -# Allocate genesis accounts (cosmos formatted addresses) -laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING + laconicd config keyring-backend $KEYRING + laconicd config chain-id $CHAINID -# Sign genesis transaction -laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID + # if $KEY exists it should be deleted + laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO -# Collect genesis tx -laconicd collect-gentxs + # Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) + laconicd init $MONIKER --chain-id $CHAINID -# Run this to ensure everything worked and that the genesis file is setup correctly -laconicd validate-genesis + # Change parameter token denominations to aphoton + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + # Custom modules + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json -if [[ $1 == "pending" ]]; then - echo "pending mode is on, please wait for the first block committed." + if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then + echo "Setting timers for expiry tests." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + fi + + if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then + echo "Enabling auction and setting timers." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + fi + + # increase block time (?) + cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + + # Set gas limit in genesis + cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + + # disable produce empty block + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml + fi + + if [[ $1 == "pending" ]]; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + fi + fi + + # Allocate genesis accounts (cosmos formatted addresses) + laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING + + # Sign genesis transaction + laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID + + # Collect genesis tx + laconicd collect-gentxs + + # Run this to ensure everything worked and that the genesis file is setup correctly + laconicd validate-genesis + + if [[ $1 == "pending" ]]; then + echo "pending mode is on, please wait for the first block committed." + fi +else + echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'" fi # Start the node (remove the --pruning=nothing flag if historical queries are not needed) diff --git a/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml b/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml index d557ace5..6c310842 100644 --- a/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml +++ b/stack_orchestrator/data/container-build/cerc-laconic-console-host/config.yml @@ -2,5 +2,5 @@ services: wns: - server: 'LACONIC_HOSTED_ENDPOINT:9473/api' - webui: 'LACONIC_HOSTED_ENDPOINT:9473/console' + server: 'LACONIC_HOSTED_ENDPOINT/api' + webui: 'LACONIC_HOSTED_ENDPOINT/console' From 3fefc67e7787fd8b4138221448d277e16f91cd66 Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Thu, 23 Nov 2023 15:00:18 +0530 Subject: [PATCH 46/62] Run azimuth contract watcher in active mode (#661) * Update stack to run azimuth job runner * Run azimuth watcher in active mode * Update stack to run job-runners for all watchers * Update ports in job-runner health checks * Map metrics ports to host * Configure historical block processing batch size for Azimuth watcher * Use deployment command for azimuth stack --------- Co-authored-by: neeraj Co-authored-by: Prathamesh Musale --- .../docker-compose-watcher-azimuth.yml | 281 +++++++++++++++++- .../watcher-azimuth/start-job-runner.sh | 28 ++ .../config/watcher-azimuth/start-server.sh | 7 +- .../watcher-config-template.toml | 6 + .../config/watcher-azimuth/watcher-params.env | 5 - .../data/stacks/azimuth/README.md | 93 ++++-- .../data/stacks/azimuth/stack.yml | 2 +- 7 files changed, 367 insertions(+), 55 deletions(-) create mode 100755 stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh delete mode 100644 stack_orchestrator/data/config/watcher-azimuth/watcher-params.env diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml index 327c77fc..b620202a 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml @@ -22,6 +22,38 @@ services: retries: 15 start_period: 10s + # Starts the azimuth-watcher job runner + azimuth-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CERC_HISTORICAL_BLOCK_RANGE: 500 + CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB + CONTRACT_NAME: Azimuth + STARTING_BLOCK: 6784880 + working_dir: /app/packages/azimuth-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/azimuth-watcher/start-job-runner.sh + ports: + - "9000" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the azimuth-watcher server azimuth-watcher-server: image: cerc/watcher-azimuth:local @@ -29,8 +61,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + azimuth-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -52,6 +84,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the censures-watcher job runner + censures-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189 + CONTRACT_NAME: Censures + STARTING_BLOCK: 6784954 + working_dir: /app/packages/censures-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/censures-watcher/start-job-runner.sh + ports: + - "9002" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9002"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the censures-watcher server censures-watcher-server: image: cerc/watcher-azimuth:local @@ -59,8 +122,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + censures-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -82,6 +145,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the claims-watcher job runner + claims-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A + CONTRACT_NAME: Claims + STARTING_BLOCK: 6784941 + working_dir: /app/packages/claims-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/claims-watcher/start-job-runner.sh + ports: + - "9004" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9004"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the claims-watcher server claims-watcher-server: image: cerc/watcher-azimuth:local @@ -89,8 +183,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + claims-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -112,6 +206,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the conditional-star-release-watcher job runner + conditional-star-release-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA + CONTRACT_NAME: ConditionalStarRelease + STARTING_BLOCK: 6828004 + working_dir: /app/packages/conditional-star-release-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/conditional-star-release-watcher/start-job-runner.sh + ports: + - "9006" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9006"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the conditional-star-release-watcher server conditional-star-release-watcher-server: image: cerc/watcher-azimuth:local @@ -119,8 +244,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + conditional-star-release-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -142,6 +267,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the delegated-sending-watcher job runner + delegated-sending-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76 + CONTRACT_NAME: DelegatedSending + STARTING_BLOCK: 6784956 + working_dir: /app/packages/delegated-sending-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/delegated-sending-watcher/start-job-runner.sh + ports: + - "9008" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the delegated-sending-watcher server delegated-sending-watcher-server: image: cerc/watcher-azimuth:local @@ -149,8 +305,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + delegated-sending-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -172,6 +328,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the ecliptic-watcher job runner + ecliptic-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73 + CONTRACT_NAME: Ecliptic + STARTING_BLOCK: 13692129 + working_dir: /app/packages/ecliptic-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/ecliptic-watcher/start-job-runner.sh + ports: + - "9010" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9010"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the ecliptic-watcher server ecliptic-watcher-server: image: cerc/watcher-azimuth:local @@ -179,8 +366,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + ecliptic-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -202,6 +389,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the linear-star-release-watcher job runner + linear-star-release-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801 + CONTRACT_NAME: LinearStarRelease + STARTING_BLOCK: 6784943 + working_dir: /app/packages/linear-star-release-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/linear-star-release-watcher/start-job-runner.sh + ports: + - "9012" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9012"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the linear-star-release-watcher server linear-star-release-watcher-server: image: cerc/watcher-azimuth:local @@ -209,8 +427,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + linear-star-release-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} @@ -232,6 +450,37 @@ services: extra_hosts: - "host.docker.internal:host-gateway" + # Starts the polls-watcher job runner + polls-watcher-job-runner: + image: cerc/watcher-azimuth:local + restart: unless-stopped + depends_on: + watcher-db: + condition: service_healthy + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} + CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL} + CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4 + CONTRACT_NAME: Polls + STARTING_BLOCK: 6784912 + working_dir: /app/packages/polls-watcher + command: "./start-job-runner.sh" + volumes: + - ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml + - ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js + - ../config/watcher-azimuth/start-job-runner.sh:/app/packages/polls-watcher/start-job-runner.sh + ports: + - "9014" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "9014"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + # Starts the polls-watcher server polls-watcher-server: image: cerc/watcher-azimuth:local @@ -239,8 +488,8 @@ services: depends_on: watcher-db: condition: service_healthy - env_file: - - ../config/watcher-azimuth/watcher-params.env + polls-watcher-job-runner: + condition: service_healthy environment: CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} diff --git a/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh new file mode 100755 index 00000000..4bcad74c --- /dev/null +++ b/stack_orchestrator/data/config/watcher-azimuth/start-job-runner.sh @@ -0,0 +1,28 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}" +echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}" +echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}" + +# Replace env variables in template TOML file +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \ + s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \ + s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/watcher-config.toml + +# Merge SO watcher config with existing config file +node merge-toml.js + +yarn watch:contract --address $CONTRACT_ADDRESS --kind $CONTRACT_NAME --checkpoint true --starting-block $STARTING_BLOCK + +echo 'yarn job-runner' +yarn job-runner diff --git a/stack_orchestrator/data/config/watcher-azimuth/start-server.sh b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh index c84c58d0..fa334653 100755 --- a/stack_orchestrator/data/config/watcher-azimuth/start-server.sh +++ b/stack_orchestrator/data/config/watcher-azimuth/start-server.sh @@ -4,18 +4,17 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}" -CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}" - echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}" echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}" +echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}" # Replace env variables in template TOML file # Read in the config template TOML file and modify it WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \ - s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}| ") + s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \ + s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ") # Write the modified content to a new file echo "$WATCHER_CONFIG" > environments/watcher-config.toml diff --git a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml index 1a4616fc..2eb814a3 100644 --- a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml @@ -2,6 +2,9 @@ host = "0.0.0.0" maxSimultaneousRequests = -1 +[metrics] + host = "0.0.0.0" + [database] host = "watcher-db" port = 5432 @@ -12,3 +15,6 @@ [upstream.ethServer] gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL" rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC" + +[jobQueue] + historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE diff --git a/stack_orchestrator/data/config/watcher-azimuth/watcher-params.env b/stack_orchestrator/data/config/watcher-azimuth/watcher-params.env deleted file mode 100644 index 8fcdc2d6..00000000 --- a/stack_orchestrator/data/config/watcher-azimuth/watcher-params.env +++ /dev/null @@ -1,5 +0,0 @@ -# Defaults - -# ipld-eth-server endpoints -DEFAULT_CERC_IPLD_ETH_RPC= -DEFAULT_CERC_IPLD_ETH_GQL= diff --git a/stack_orchestrator/data/stacks/azimuth/README.md b/stack_orchestrator/data/stacks/azimuth/README.md index 67f42b75..f7d93f33 100644 --- a/stack_orchestrator/data/stacks/azimuth/README.md +++ b/stack_orchestrator/data/stacks/azimuth/README.md @@ -9,19 +9,11 @@ Prerequisite: `ipld-eth-server` RPC and GQL endpoints Clone required repositories: ```bash -laconic-so --stack azimuth setup-repositories +laconic-so --stack azimuth setup-repositories --pull ``` NOTE: If the repository already exists and checked out to a different version, `setup-repositories` command will throw an error. -For getting around this, the `azimuth-watcher-ts` repository can be removed and then run the command. - -Checkout to the required versions and branches in repos - -```bash -# azimuth-watcher-ts -cd ~/cerc/azimuth-watcher-ts -git checkout v0.1.0 -``` +For getting around this, the `azimuth-watcher-ts` repository can be removed and then run the command again. Build the container images: @@ -31,42 +23,85 @@ laconic-so --stack azimuth build-containers This should create the required docker images in the local image registry. -### Configuration +## Create a deployment -* Create and update an env file to be used in the next step: +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: +```bash +laconic-so --stack azimuth deploy init --output azimuth-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host + +```yaml +... +network: + ports: + watcher-db: + - 0.0.0.0:15432:5432 + azimuth-watcher-server: + - 0.0.0.0:3001:3001 + censures-watcher-server: + - 0.0.0.0:3002:3002 + claims-watcher-server: + - 0.0.0.0:3003:3003 + conditional-star-release-watcher-server: + - 0.0.0.0:3004:3004 + delegated-sending-watcher-server: + - 0.0.0.0:3005:3005 + ecliptic-watcher-server: + - 0.0.0.0:3006:3006 + linear-star-release-watcher-server: + - 0.0.0.0:3007:3007 + polls-watcher-server: + - 0.0.0.0:3008:3008 + gateway-server: + - 0.0.0.0:4000:4000 +... +``` + +### Data volumes +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: +```bash +laconic-so --stack azimuth deploy create --spec-file azimuth-spec.yml --deployment-dir azimuth-deployment +``` + +## Set env variables + +Inside the deployment directory, open the file `config.env` and add variable to update RPC endpoint : ```bash - # External ipld-eth-server endpoints + # External RPC endpoints CERC_IPLD_ETH_RPC= - CERC_IPLD_ETH_GQL= ``` -* NOTE: If `ipld-eth-server` is running on the host machine, use `host.docker.internal` as the hostname to access host ports +* NOTE: If RPC endpoint is on the host machine, use `host.docker.internal` as the hostname to access the host port, or use the `ip a` command to find the IP address of the `docker0` interface (this will usually be something like `172.17.0.1` or `172.18.0.1`) -### Deploy the stack +## Start the stack -* Deploy the containers: - - ```bash - laconic-so --stack azimuth deploy-system --env-file up - ``` +Start the deployment: +```bash +laconic-so deployment --dir azimuth-deployment start +``` * List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` ## Clean up -Stop all the services running in background: +To stop all azimuth services running in the background, while preserving chain data: ```bash -laconic-so --stack azimuth deploy-system down +laconic-so deployment --dir azimuth-deployment stop ``` -Clear volumes created by this stack: +To stop all azimuth services and also delete data: ```bash -# List all relevant volumes -docker volume ls -q --filter "name=.*watcher_db_data" - -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=.*watcher_db_data") +laconic-so deployment --dir azimuth-deployment stop --delete-volumes ``` diff --git a/stack_orchestrator/data/stacks/azimuth/stack.yml b/stack_orchestrator/data/stacks/azimuth/stack.yml index 47e0d058..bb7f2a88 100644 --- a/stack_orchestrator/data/stacks/azimuth/stack.yml +++ b/stack_orchestrator/data/stacks/azimuth/stack.yml @@ -1,7 +1,7 @@ version: "1.0" name: azimuth repos: - - github.com/cerc-io/azimuth-watcher-ts@v0.1.1 + - github.com/cerc-io/azimuth-watcher-ts containers: - cerc/watcher-azimuth pods: From 9499941891c66acbc5fd2cf358b32cf8a97058b8 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Thu, 23 Nov 2023 19:11:02 +0530 Subject: [PATCH 47/62] Increase max connections for Azimuth watcher dbs (#665) --- .../data/compose/docker-compose-watcher-azimuth.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml index b620202a..48e77082 100644 --- a/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml +++ b/stack_orchestrator/data/compose/docker-compose-watcher-azimuth.yml @@ -10,6 +10,7 @@ services: - POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue - POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto, - POSTGRES_PASSWORD=password + command: ["postgres", "-c", "max_connections=200"] volumes: - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - watcher_db_data:/var/lib/postgresql/data From 1b94db27c18a4a10a944de18d0733209f57e6225 Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Fri, 24 Nov 2023 14:05:37 +0530 Subject: [PATCH 48/62] Upgrade azimuth watcher release version to `0.1.2` (#666) * Upgrade azimuth watcher release version * Fix version for azimuth watcher repo --- .../data/config/watcher-azimuth/watcher-config-template.toml | 1 + stack_orchestrator/data/stacks/azimuth/stack.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml index 2eb814a3..2a91fedf 100644 --- a/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-azimuth/watcher-config-template.toml @@ -18,3 +18,4 @@ [jobQueue] historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE + blockDelayInMilliSecs = 12000 diff --git a/stack_orchestrator/data/stacks/azimuth/stack.yml b/stack_orchestrator/data/stacks/azimuth/stack.yml index bb7f2a88..7adbe663 100644 --- a/stack_orchestrator/data/stacks/azimuth/stack.yml +++ b/stack_orchestrator/data/stacks/azimuth/stack.yml @@ -1,7 +1,7 @@ version: "1.0" name: azimuth repos: - - github.com/cerc-io/azimuth-watcher-ts + - github.com/cerc-io/azimuth-watcher-ts@v0.1.2 containers: - cerc/watcher-azimuth pods: From a68cd5d65ce6404fd1d1528d094b097b140ef400 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Mon, 27 Nov 2023 22:02:16 -0700 Subject: [PATCH 49/62] Webapp deploy (#662) --- .../docker-compose-webapp-template.yml | 8 ++ .../data/stacks/webapp-template/README.md | 1 + .../data/stacks/webapp-template/stack.yml | 7 ++ stack_orchestrator/deploy/deploy.py | 2 +- .../deploy/deployment_create.py | 37 ++++-- stack_orchestrator/deploy/k8s/cluster_info.py | 28 ++++- stack_orchestrator/deploy/k8s/deploy_k8s.py | 4 +- stack_orchestrator/deploy/webapp/__init__.py | 0 .../deploy/webapp/deploy_webapp.py | 113 ++++++++++++++++++ .../deploy/{ => webapp}/run_webapp.py | 8 +- stack_orchestrator/main.py | 3 +- 11 files changed, 191 insertions(+), 20 deletions(-) create mode 100644 stack_orchestrator/data/compose/docker-compose-webapp-template.yml create mode 100644 stack_orchestrator/data/stacks/webapp-template/README.md create mode 100644 stack_orchestrator/data/stacks/webapp-template/stack.yml create mode 100644 stack_orchestrator/deploy/webapp/__init__.py create mode 100644 stack_orchestrator/deploy/webapp/deploy_webapp.py rename stack_orchestrator/deploy/{ => webapp}/run_webapp.py (91%) diff --git a/stack_orchestrator/data/compose/docker-compose-webapp-template.yml b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml new file mode 100644 index 00000000..b8697afa --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-webapp-template.yml @@ -0,0 +1,8 @@ +services: + webapp: + image: cerc/webapp-container:local + restart: always + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + ports: + - "3000" diff --git a/stack_orchestrator/data/stacks/webapp-template/README.md b/stack_orchestrator/data/stacks/webapp-template/README.md new file mode 100644 index 00000000..4441e475 --- /dev/null +++ b/stack_orchestrator/data/stacks/webapp-template/README.md @@ -0,0 +1 @@ +# Template stack for webapp deployments diff --git a/stack_orchestrator/data/stacks/webapp-template/stack.yml b/stack_orchestrator/data/stacks/webapp-template/stack.yml new file mode 100644 index 00000000..d574e764 --- /dev/null +++ b/stack_orchestrator/data/stacks/webapp-template/stack.yml @@ -0,0 +1,7 @@ +version: "1.0" +name: test +description: "Webapp deployment stack" +containers: + - cerc/webapp-template-container +pods: + - webapp-template diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 32c13a61..df231e74 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -276,7 +276,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" if ctx.debug: print(f"pre-hash descriptor: {unique_cluster_descriptor}") - hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] cluster = f"laconic-{hash}" if ctx.verbose: print(f"Using cluster name: {cluster}") diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 64647ab2..fd52dba8 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -22,6 +22,7 @@ import random from shutil import copy, copyfile, copytree import sys from stack_orchestrator import constants +from stack_orchestrator.opts import opts from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_paths, error_exit) @@ -257,13 +258,29 @@ def _parse_config_variables(variable_values: str): "localhost-same, any-same, localhost-fixed-random, any-fixed-random") @click.pass_context def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): - yaml = get_yaml() stack = global_options(ctx).stack - debug = global_options(ctx).debug deployer_type = ctx.obj.deployer.type - default_spec_file_content = call_stack_deploy_init(ctx.obj) + deploy_command_context = ctx.obj + return init_operation( + deploy_command_context, + stack, deployer_type, + config, kube_config, + image_registry, + output, + map_ports_to_host) + + +# The init command's implementation is in a separate function so that we can +# call it from other commands, bypassing the click decoration stuff +def init_operation(deploy_command_context, stack, deployer_type, config, kube_config, image_registry, output, map_ports_to_host): + yaml = get_yaml() + default_spec_file_content = call_stack_deploy_init(deploy_command_context) spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} if deployer_type == "k8s": + if kube_config is None: + error_exit("--kube-config must be supplied with --deploy-to k8s") + if image_registry is None: + error_exit("--image-registry must be supplied with --deploy-to k8s") spec_file_content.update({constants.kube_config_key: kube_config}) spec_file_content.update({constants.image_resigtry_key: image_registry}) else: @@ -281,7 +298,7 @@ def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): new_config = config_variables["config"] merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) - if debug: + if opts.o.debug: print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") ports = _get_mapped_ports(stack, map_ports_to_host) @@ -329,12 +346,19 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): @click.option("--initial-peers", help="Initial set of persistent peers") @click.pass_context def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): + deployment_command_context = ctx.obj + return create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers) + + +# The init command's implementation is in a separate function so that we can +# call it from other commands, bypassing the click decoration stuff +def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers): parsed_spec = get_parsed_deployment_spec(spec_file) stack_name = parsed_spec["stack"] deployment_type = parsed_spec[constants.deploy_to_key] stack_file = get_stack_file_path(stack_name) parsed_stack = get_parsed_stack_config(stack_name) - if global_options(ctx).debug: + if opts.o.debug: print(f"parsed spec: {parsed_spec}") if deployment_dir is None: deployment_dir_path = _make_default_deployment_dir() @@ -366,7 +390,7 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod) destination_pod_dir = destination_pods_dir.joinpath(pod) os.mkdir(destination_pod_dir) - if global_options(ctx).debug: + if opts.o.debug: print(f"extra config dirs: {extra_config_dirs}") _fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir) with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file: @@ -390,7 +414,6 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers): # Delegate to the stack's Python code # The deploy create command doesn't require a --stack argument so we need to insert the # stack member here. - deployment_command_context = ctx.obj deployment_command_context.stack = stack_name deployment_context = DeploymentContext() deployment_context.init(deployment_dir_path) diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index a7426804..6c19b20a 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -29,18 +29,19 @@ from stack_orchestrator.deploy.images import remote_tag_for_image class ClusterInfo: parsed_pod_yaml_map: Any image_set: Set[str] = set() - app_name: str = "test-app" + app_name: str environment_variables: DeployEnvVars spec: Spec def __init__(self) -> None: pass - def int(self, pod_files: List[str], compose_env_file, spec: Spec): + def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec): self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files) # Find the set of images in the pods self.image_set = images_for_deployment(pod_files) self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file)) + self.app_name = deployment_name self.spec = spec if (opts.o.debug): print(f"Env vars: {self.environment_variables.map}") @@ -67,6 +68,8 @@ class ClusterInfo: proxy_to = route["proxy-to"] if opts.o.debug: print(f"proxy config: {path} -> {proxy_to}") + # proxy_to has the form : + proxy_to_port = int(proxy_to.split(":")[1]) paths.append(client.V1HTTPIngressPath( path_type="Prefix", path=path, @@ -75,7 +78,7 @@ class ClusterInfo: # TODO: this looks wrong name=f"{self.app_name}-service", # TODO: pull port number from the service - port=client.V1ServiceBackendPort(number=80) + port=client.V1ServiceBackendPort(number=proxy_to_port) ) ) )) @@ -101,14 +104,23 @@ class ClusterInfo: ) return ingress + # TODO: suppoprt multiple services def get_service(self): + for pod_name in self.parsed_pod_yaml_map: + pod = self.parsed_pod_yaml_map[pod_name] + services = pod["services"] + for service_name in services: + service_info = services[service_name] + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"service port: {port}") service = client.V1Service( metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"), spec=client.V1ServiceSpec( type="ClusterIP", ports=[client.V1ServicePort( - port=80, - target_port=80 + port=port, + target_port=port )], selector={"app": self.app_name} ) @@ -165,6 +177,10 @@ class ClusterInfo: container_name = service_name service_info = services[service_name] image = service_info["image"] + port = int(service_info["ports"][0]) + if opts.o.debug: + print(f"image: {image}") + print(f"service port: {port}") # Re-write the image tag for remote deployment image_to_use = remote_tag_for_image( image, self.spec.get_image_registry()) if self.spec.get_image_registry() is not None else image @@ -173,7 +189,7 @@ class ClusterInfo: name=container_name, image=image_to_use, env=envs_from_environment_variables_map(self.environment_variables.map), - ports=[client.V1ContainerPort(container_port=80)], + ports=[client.V1ContainerPort(container_port=port)], volume_mounts=volume_mounts, resources=client.V1ResourceRequirements( requests={"cpu": "100m", "memory": "200Mi"}, diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 8e790d10..5d41ae23 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -55,7 +55,7 @@ class K8sDeployer(Deployer): self.deployment_context = deployment_context self.kind_cluster_name = compose_project_name self.cluster_info = ClusterInfo() - self.cluster_info.int(compose_files, compose_env_file, deployment_context.spec) + self.cluster_info.int(compose_files, compose_env_file, compose_project_name, deployment_context.spec) if (opts.o.debug): print(f"Deployment dir: {deployment_context.deployment_dir}") print(f"Compose files: {compose_files}") @@ -126,6 +126,8 @@ class K8sDeployer(Deployer): # TODO: disable ingress for kind ingress: client.V1Ingress = self.cluster_info.get_ingress() + if opts.o.debug: + print(f"Sending this ingress: {ingress}") ingress_resp = self.networking_api.create_namespaced_ingress( namespace=self.k8s_namespace, body=ingress diff --git a/stack_orchestrator/deploy/webapp/__init__.py b/stack_orchestrator/deploy/webapp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py new file mode 100644 index 00000000..a1e573fb --- /dev/null +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -0,0 +1,113 @@ +# Copyright ©2023 Vulcanize + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click +from pathlib import Path +from urllib.parse import urlparse + +from stack_orchestrator.util import error_exit, global_options2 +from stack_orchestrator.deploy.deployment_create import init_operation, create_operation +from stack_orchestrator.deploy.deploy import create_deploy_context +from stack_orchestrator.deploy.deploy_types import DeployCommandContext + + +def _fixup_container_tag(deployment_dir: str, image: str): + deployment_dir_path = Path(deployment_dir) + compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml") + # replace "cerc/webapp-container:local" in the file with our image tag + with open(compose_file) as rfile: + contents = rfile.read() + contents = contents.replace("cerc/webapp-container:local", image) + with open(compose_file, "w") as wfile: + wfile.write(contents) + + +def _fixup_url_spec(spec_file_name: str, url: str): + # url is like: https://example.com/path + parsed_url = urlparse(url) + http_proxy_spec = f''' + http-proxy: + - host-name: {parsed_url.hostname} + routes: + - path: '{parsed_url.path if parsed_url.path else "/"}' + proxy-to: webapp:3000 + ''' + spec_file_path = Path(spec_file_name) + with open(spec_file_path) as rfile: + contents = rfile.read() + contents = contents + http_proxy_spec + with open(spec_file_path, "w") as wfile: + wfile.write(contents) + + +@click.group() +@click.pass_context +def command(ctx): + '''manage a webapp deployment''' + + # Check that --stack wasn't supplied + if ctx.parent.obj.stack: + error_exit("--stack can't be supplied with the deploy-webapp command") + + +@command.command() +@click.option("--kube-config", help="Provide a config file for a k8s deployment") +@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") +@click.option("--deployment-dir", help="Create deployment files in this directory", required=True) +@click.option("--image", help="image to deploy", required=True) +@click.option("--url", help="url to serve", required=True) +@click.option("--env-file", help="environment file for webapp") +@click.pass_context +def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file): + '''create a deployment for the specified webapp container''' + # Do the equivalent of: + # 1. laconic-so --stack webapp-template deploy --deploy-to k8s init --output webapp-spec.yml + # --config (eqivalent of the contents of my-config.env) + # 2. laconic-so --stack webapp-template deploy --deploy-to k8s create --deployment-dir test-deployment + # --spec-file webapp-spec.yml + # 3. Replace the container image tag with the specified image + deployment_dir_path = Path(deployment_dir) + # Check the deployment dir does not exist + if deployment_dir_path.exists(): + error_exit(f"Deployment dir {deployment_dir} already exists") + # Generate a temporary file name for the spec file + spec_file_name = "webapp-spec.yml" + # Specify the webapp template stack + stack = "webapp-template" + # TODO: support env file + deploy_command_context: DeployCommandContext = create_deploy_context( + global_options2(ctx), None, stack, None, None, None, env_file, "k8s" + ) + init_operation( + deploy_command_context, + stack, + "k8s", + None, + kube_config, + image_registry, + spec_file_name, + None + ) + # Add the TLS and DNS spec + _fixup_url_spec(spec_file_name, url) + create_operation( + deploy_command_context, + spec_file_name, + deployment_dir, + None, + None + ) + # Fix up the container tag inside the deployment compose file + _fixup_container_tag(deployment_dir, image) diff --git a/stack_orchestrator/deploy/run_webapp.py b/stack_orchestrator/deploy/webapp/run_webapp.py similarity index 91% rename from stack_orchestrator/deploy/run_webapp.py rename to stack_orchestrator/deploy/webapp/run_webapp.py index aa22acdf..e4e01171 100644 --- a/stack_orchestrator/deploy/run_webapp.py +++ b/stack_orchestrator/deploy/webapp/run_webapp.py @@ -22,17 +22,17 @@ import hashlib import click - from dotenv import dotenv_values + +from stack_orchestrator import constants from stack_orchestrator.deploy.deployer_factory import getDeployer @click.command() @click.option("--image", help="image to deploy", required=True) -@click.option("--deploy-to", default="compose", help="deployment type ([Docker] 'compose' or 'k8s')") @click.option("--env-file", help="environment file for webapp") @click.pass_context -def command(ctx, image, deploy_to, env_file): +def command(ctx, image, env_file): '''build the specified webapp container''' env = {} @@ -43,7 +43,7 @@ def command(ctx, image, deploy_to, env_file): hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() cluster = f"laconic-webapp-{hash}" - deployer = getDeployer(deploy_to, + deployer = getDeployer(type=constants.compose_deploy_type, deployment_context=None, compose_files=None, compose_project_name=cluster, diff --git a/stack_orchestrator/main.py b/stack_orchestrator/main.py index 8ee8ae61..26a011b0 100644 --- a/stack_orchestrator/main.py +++ b/stack_orchestrator/main.py @@ -20,7 +20,7 @@ from stack_orchestrator.repos import setup_repositories from stack_orchestrator.build import build_containers from stack_orchestrator.build import build_npms from stack_orchestrator.build import build_webapp -from stack_orchestrator.deploy import run_webapp +from stack_orchestrator.deploy.webapp import run_webapp, deploy_webapp from stack_orchestrator.deploy import deploy from stack_orchestrator import version from stack_orchestrator.deploy import deployment @@ -52,6 +52,7 @@ cli.add_command(build_containers.command, "build-containers") cli.add_command(build_npms.command, "build-npms") cli.add_command(build_webapp.command, "build-webapp") cli.add_command(run_webapp.command, "run-webapp") +cli.add_command(deploy_webapp.command, "deploy-webapp") cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy-system") cli.add_command(deployment.command, "deployment") From 1a069a6816300ac26dfa7797508ecee0f38c320e Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 28 Nov 2023 19:56:12 -0700 Subject: [PATCH 50/62] Use a temp file for the spec file name (#668) --- stack_orchestrator/deploy/webapp/deploy_webapp.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py index a1e573fb..a2cd7253 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -14,8 +14,10 @@ # along with this program. If not, see . import click +import os from pathlib import Path from urllib.parse import urlparse +from tempfile import NamedTemporaryFile from stack_orchestrator.util import error_exit, global_options2 from stack_orchestrator.deploy.deployment_create import init_operation, create_operation @@ -83,7 +85,8 @@ def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_fil if deployment_dir_path.exists(): error_exit(f"Deployment dir {deployment_dir} already exists") # Generate a temporary file name for the spec file - spec_file_name = "webapp-spec.yml" + tf = NamedTemporaryFile(prefix="webapp-", suffix=".yml", delete=False) + spec_file_name = tf.name # Specify the webapp template stack stack = "webapp-template" # TODO: support env file @@ -111,3 +114,4 @@ def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_fil ) # Fix up the container tag inside the deployment compose file _fixup_container_tag(deployment_dir, image) + os.remove(spec_file_name) From 113c0bfbf163d1158c4ab67c38dd50cdd60667e9 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 28 Nov 2023 21:14:02 -0700 Subject: [PATCH 51/62] Propagate env file for webapp deployment (#669) --- stack_orchestrator/constants.py | 2 ++ .../deploy/deployment_create.py | 31 +++++++++++++------ stack_orchestrator/deploy/k8s/cluster_info.py | 3 +- stack_orchestrator/deploy/k8s/helpers.py | 5 --- .../deploy/webapp/deploy_webapp.py | 1 + stack_orchestrator/util.py | 6 ++++ 6 files changed, 33 insertions(+), 15 deletions(-) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index 1cff6055..54cfe355 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -14,6 +14,8 @@ # along with this program. If not, see . stack_file_name = "stack.yml" +spec_file_name = "spec.yml" +config_file_name = "config.env" compose_deploy_type = "compose" k8s_kind_deploy_type = "k8s-kind" k8s_deploy_type = "k8s" diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index fd52dba8..88ce0b2a 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -25,7 +25,7 @@ from stack_orchestrator import constants from stack_orchestrator.opts import opts from stack_orchestrator.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts, - get_pod_script_paths, get_plugin_code_paths, error_exit) + get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file) from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -244,12 +244,13 @@ def _parse_config_variables(variable_values: str): variable_name = variable_value_pair[0] variable_value = variable_value_pair[1] result_values[variable_name] = variable_value - result = {"config": result_values} + result = result_values return result @click.command() @click.option("--config", help="Provide config variables for the deployment") +@click.option("--config-file", help="Provide config variables in a file for the deployment") @click.option("--kube-config", help="Provide a config file for a k8s deployment") @click.option("--image-registry", help="Provide a container image registry url for this k8s cluster") @click.option("--output", required=True, help="Write yaml spec file here") @@ -257,14 +258,15 @@ def _parse_config_variables(variable_values: str): help="Map ports to the host as one of: any-variable-random (default), " "localhost-same, any-same, localhost-fixed-random, any-fixed-random") @click.pass_context -def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): +def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host): stack = global_options(ctx).stack deployer_type = ctx.obj.deployer.type deploy_command_context = ctx.obj return init_operation( deploy_command_context, stack, deployer_type, - config, kube_config, + config, config_file, + kube_config, image_registry, output, map_ports_to_host) @@ -272,7 +274,8 @@ def init(ctx, config, kube_config, image_registry, output, map_ports_to_host): # The init command's implementation is in a separate function so that we can # call it from other commands, bypassing the click decoration stuff -def init_operation(deploy_command_context, stack, deployer_type, config, kube_config, image_registry, output, map_ports_to_host): +def init_operation(deploy_command_context, stack, deployer_type, config, + config_file, kube_config, image_registry, output, map_ports_to_host): yaml = get_yaml() default_spec_file_content = call_stack_deploy_init(deploy_command_context) spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} @@ -292,12 +295,22 @@ def init_operation(deploy_command_context, stack, deployer_type, config, kube_co if default_spec_file_content: spec_file_content.update(default_spec_file_content) config_variables = _parse_config_variables(config) + # Implement merge, since update() overwrites if config_variables: - # Implement merge, since update() overwrites orig_config = spec_file_content.get("config", {}) - new_config = config_variables["config"] + new_config = config_variables merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) + if config_file: + config_file_path = Path(config_file) + if not config_file_path.exists(): + error_exit(f"config file: {config_file} does not exist") + config_file_variables = env_var_map_from_file(config_file_path) + if config_file_variables: + orig_config = spec_file_content.get("config", {}) + new_config = config_file_variables + merged_config = {**new_config, **orig_config} + spec_file_content.update({"config": merged_config}) if opts.o.debug: print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") @@ -368,10 +381,10 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw error_exit(f"{deployment_dir_path} already exists") os.mkdir(deployment_dir_path) # Copy spec file and the stack file into the deployment dir - copyfile(spec_file, deployment_dir_path.joinpath("spec.yml")) + copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) # Copy any config varibles from the spec file into an env file suitable for compose - _write_config_file(spec_file, deployment_dir_path.joinpath("config.env")) + _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) # Copy any k8s config file into the deployment dir if deployment_type == "k8s": _write_kube_config_file(Path(parsed_spec[constants.kube_config_key]), diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 6c19b20a..0aa74189 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -17,9 +17,10 @@ from kubernetes import client from typing import Any, List, Set from stack_orchestrator.opts import opts +from stack_orchestrator.util import env_var_map_from_file from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files from stack_orchestrator.deploy.k8s.helpers import get_node_pv_mount_path -from stack_orchestrator.deploy.k8s.helpers import env_var_map_from_file, envs_from_environment_variables_map +from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment from stack_orchestrator.deploy.deploy_types import DeployEnvVars from stack_orchestrator.deploy.spec import Spec diff --git a/stack_orchestrator/deploy/k8s/helpers.py b/stack_orchestrator/deploy/k8s/helpers.py index 82a33792..9f968dbf 100644 --- a/stack_orchestrator/deploy/k8s/helpers.py +++ b/stack_orchestrator/deploy/k8s/helpers.py @@ -14,7 +14,6 @@ # along with this program. If not, see . from kubernetes import client -from dotenv import dotenv_values import os from pathlib import Path import subprocess @@ -224,7 +223,3 @@ def generate_kind_config(deployment_dir: Path): f"{port_mappings_yml}\n" f"{mounts_yml}\n" ) - - -def env_var_map_from_file(file: Path) -> Mapping[str, str]: - return dotenv_values(file) diff --git a/stack_orchestrator/deploy/webapp/deploy_webapp.py b/stack_orchestrator/deploy/webapp/deploy_webapp.py index a2cd7253..391162c9 100644 --- a/stack_orchestrator/deploy/webapp/deploy_webapp.py +++ b/stack_orchestrator/deploy/webapp/deploy_webapp.py @@ -98,6 +98,7 @@ def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_fil stack, "k8s", None, + env_file, kube_config, image_registry, spec_file_name, diff --git a/stack_orchestrator/util.py b/stack_orchestrator/util.py index 97d48963..0bd1a609 100644 --- a/stack_orchestrator/util.py +++ b/stack_orchestrator/util.py @@ -18,6 +18,8 @@ import os.path import sys import ruamel.yaml from pathlib import Path +from dotenv import dotenv_values +from typing import Mapping def include_exclude_check(s, include, exclude): @@ -178,3 +180,7 @@ def global_options2(ctx): def error_exit(s): print(f"ERROR: {s}") sys.exit(1) + + +def env_var_map_from_file(file: Path) -> Mapping[str, str]: + return dotenv_values(file) From 03a3645b3c5486668f7fbd03d60dc72ce4eac574 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 29 Nov 2023 11:32:28 -0600 Subject: [PATCH 52/62] Add --port option to run-webapp. (#667) * Add --port option to run-webapp * Fixed merge * lint --- stack_orchestrator/deploy/compose/deploy_docker.py | 4 ++-- stack_orchestrator/deploy/deployer.py | 2 +- stack_orchestrator/deploy/k8s/deploy_k8s.py | 2 +- stack_orchestrator/deploy/webapp/run_webapp.py | 14 ++++++++++---- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index 4b4e7426..04f24df5 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -64,10 +64,10 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) - def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): try: return self.docker.run(image=image, command=command, user=user, volumes=volumes, - entrypoint=entrypoint, envs=env, detach=detach, publish_all=True) + entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0) except DockerException as e: raise DeployerException(e) diff --git a/stack_orchestrator/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py index 79379c3d..984945ed 100644 --- a/stack_orchestrator/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -44,7 +44,7 @@ class Deployer(ABC): pass @abstractmethod - def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): pass diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index 5d41ae23..c84aa34a 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -230,7 +230,7 @@ class K8sDeployer(Deployer): log_data = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container="test") return log_stream_from_string(log_data) - def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, detach=False): + def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False): # We need to figure out how to do this -- check why we're being called first pass diff --git a/stack_orchestrator/deploy/webapp/run_webapp.py b/stack_orchestrator/deploy/webapp/run_webapp.py index e4e01171..4dbf234a 100644 --- a/stack_orchestrator/deploy/webapp/run_webapp.py +++ b/stack_orchestrator/deploy/webapp/run_webapp.py @@ -27,13 +27,16 @@ from dotenv import dotenv_values from stack_orchestrator import constants from stack_orchestrator.deploy.deployer_factory import getDeployer +WEBAPP_PORT = 3000 + @click.command() @click.option("--image", help="image to deploy", required=True) @click.option("--env-file", help="environment file for webapp") +@click.option("--port", help="port to use (default random)") @click.pass_context -def command(ctx, image, env_file): - '''build the specified webapp container''' +def command(ctx, image, env_file, port): + '''run the specified webapp container''' env = {} if env_file: @@ -49,10 +52,13 @@ def command(ctx, image, env_file): compose_project_name=cluster, compose_env_file=None) - container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, detach=True) + ports = [] + if port: + ports = [(port, WEBAPP_PORT)] + container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, ports=ports, detach=True) # Make configurable? - webappPort = "3000/tcp" + webappPort = f"{WEBAPP_PORT}/tcp" # TODO: This assumes a Docker container object... if webappPort in container.network_settings.ports: mapping = container.network_settings.ports[webappPort][0] From d7093277b4743b7eb0d533bf6c6d0327d696f762 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 29 Nov 2023 20:50:53 -0700 Subject: [PATCH 53/62] Use constants (#671) --- stack_orchestrator/deploy/deployment_context.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py index cd731394..cbee4151 100644 --- a/stack_orchestrator/deploy/deployment_context.py +++ b/stack_orchestrator/deploy/deployment_context.py @@ -16,6 +16,7 @@ from pathlib import Path +from stack_orchestrator import constants from stack_orchestrator.deploy.stack import Stack from stack_orchestrator.deploy.spec import Spec @@ -26,13 +27,13 @@ class DeploymentContext: stack: Stack def get_stack_file(self): - return self.deployment_dir.joinpath("stack.yml") + return self.deployment_dir.joinpath(constants.stack_file_name) def get_spec_file(self): - return self.deployment_dir.joinpath("spec.yml") + return self.deployment_dir.joinpath(constants.spec_file_name) def get_env_file(self): - return self.deployment_dir.joinpath("config.env") + return self.deployment_dir.joinpath(constants.config_file_name) # TODO: implement me def get_cluster_name(self): From c19559967d667c59554721bb85226d23ab781a12 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Wed, 29 Nov 2023 20:55:14 -0700 Subject: [PATCH 54/62] Add doc for deploy-webapp (#672) --- docs/webapp.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/webapp.md b/docs/webapp.md index 3b1d0609..fcf4ffcb 100644 --- a/docs/webapp.md +++ b/docs/webapp.md @@ -34,7 +34,7 @@ To test locally run: ## Running -With `run-webapp` a new container will be launched with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration. +With `run-webapp` a new container will be launched on the local machine, with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration. **Example**: ``` @@ -52,3 +52,13 @@ Image: cerc/test-progressive-web-app:local ID: 9ab96494f563aafb6c057d88df58f9eca81b90f8721a4e068493a289a976051c URL: http://localhost:32769 ``` + +## Deploying + +Use the subcommand `deploy-webapp create` to make a deployment directory that can be subsequently deployed to a Kubernetes cluster. +Example commands are shown below, assuming that the webapp container image `cerc/test-progressive-web-app:local` has already been built: +``` +$ laconic-so deploy-webapp create --kube-config ~/kubectl/k8s-kubeconfig.yaml --image-registry registry.digitalocean.com/laconic-registry --deployment-dir webapp-k8s-deployment --image cerc/test-progressive-web-app:local --url https://test-pwa-app.hosting.laconic.com/ --env-file test-webapp.env +$ laconic-so deployment --dir webapp-k8s-deployment push-images +$ laconic-so deployment --dir webapp-k8s-deployment start +``` From 2173e7ce6a1eab21ee11149d61fb6537c3cccc8c Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Thu, 30 Nov 2023 12:33:06 -0600 Subject: [PATCH 55/62] If the next version is unsupported, print a big warning and try higher version. (#674) --- .../cerc-nextjs-base/Dockerfile | 2 ++ .../cerc-nextjs-base/scripts/build-app.sh | 32 ++++++++++++++++--- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile index c2416b67..d3ff3f1b 100644 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/Dockerfile @@ -24,6 +24,8 @@ RUN \ && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ # Install eslint && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + # Install semver + && su ${USERNAME} -c "umask 0002 && npm install -g semver" \ && npm cache clean --force > /dev/null 2>&1 # [Optional] Uncomment this section to install additional OS packages. diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh index e62bc0d0..ef6244cf 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/scripts/build-app.sh @@ -4,10 +4,12 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +CERC_MIN_NEXTVER=13.4.2 + CERC_NEXT_VERSION="${CERC_NEXT_VERSION:-keep}" CERC_BUILD_TOOL="${CERC_BUILD_TOOL}" if [ -z "$CERC_BUILD_TOOL" ]; then - if [ -f "yarn.lock" ] && [ ! -f "package-lock.json" ]; then + if [ -f "yarn.lock" ]; then CERC_BUILD_TOOL=yarn else CERC_BUILD_TOOL=npm @@ -101,13 +103,35 @@ cat package.dist | jq '.scripts.cerc_compile = "next experimental-compile"' | jq CUR_NEXT_VERSION="`jq -r '.dependencies.next' package.json`" if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VERSION" ]; then - echo "Changing 'next' version specifier from '$CUR_NEXT_VERSION' to '$CERC_NEXT_VERSION' (set with --build-arg CERC_NEXT_VERSION)" + echo "Changing 'next' version specifier from '$CUR_NEXT_VERSION' to '$CERC_NEXT_VERSION' (set with '--extra-build-args \"--build-arg CERC_NEXT_VERSION=$CERC_NEXT_VERSION\"')" cat package.json | jq ".dependencies.next = \"$CERC_NEXT_VERSION\"" | sponge package.json -else - echo "'next' version specifier '$CUR_NEXT_VERSION' (override with --build-arg CERC_NEXT_VERSION)" fi $CERC_BUILD_TOOL install || exit 1 + +CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json` + +semver -p -r ">=$CERC_MIN_NEXTVER" $CUR_NEXT_VERSION +if [ $? -ne 0 ]; then + cat <" + +############################################################################### + +EOF + cat package.json | jq ".dependencies.next = \"^$CERC_MIN_NEXTVER\"" | sponge package.json + $CERC_BUILD_TOOL install || exit 1 +fi + $CERC_BUILD_TOOL run cerc_compile || exit 1 exit 0 From c319e90ddddb215ff4049afbce3ec87742e155bb Mon Sep 17 00:00:00 2001 From: Nabarun Gogoi Date: Mon, 4 Dec 2023 18:39:19 +0530 Subject: [PATCH 56/62] Add a stack for running uniswap frontend on urbit (#670) * Create uniswap-frontend stack * Add stack for building uniswap frontend app * Add a container for Urbit fake ship * Update with deployment command * Add a service for uniswap app deployment to urbit * Use a script to start urbit ship to handle restarts * Rename stack name to uniswap-urbit-app * Rename build.sh to build-app.sh and check if build already exists * Rename stack directory name * Update uniswap build restart on failure * Perform uniswap app deployment in the urbit container * Add steps to create glob for the app * Tail /dev/null after deployment * Add steps to install the app to desk * Host glob files for uniswap * Update repo branch * Update readme with command to get urbit password * Update readme * Update readme to open urbit web UI * Expose the port on glob hosting container * Avoid exposing urbit http port * Add scripts for installing uniswap on remote urbit instance * Configure GQL proxy for uniswap app * Use laconic branch for app repo * Rename urbit pod for uniswap app deployment --------- Co-authored-by: Prathamesh Musale --- .../docker-compose-uniswap-interface.yml | 49 ++++++ .../compose/docker-compose-uniswap-urbit.yml | 26 ++++ .../config/uniswap-interface/build-app.sh | 18 +++ .../uniswap-interface/deploy-uniswap-app.sh | 142 ++++++++++++++++++ .../uniswap-interface/host-uniswap-glob.sh | 23 +++ .../uniswap-interface/install-uniswap-app.sh | 109 ++++++++++++++ .../remote-deploy-uniswap.sh | 18 +++ .../data/config/urbit/run-urbit-ship.sh | 18 +++ .../cerc-uniswap-interface/Dockerfile | 10 ++ .../cerc-uniswap-interface/build.sh | 8 + .../cerc-urbit-globs-host/Dockerfile | 7 + .../cerc-urbit-globs-host/build.sh | 9 ++ .../data/container-image-list.txt | 1 + stack_orchestrator/data/repository-list.txt | 1 + .../data/stacks/uniswap-urbit-app/README.md | 122 +++++++++++++++ .../data/stacks/uniswap-urbit-app/stack.yml | 10 ++ 16 files changed, 571 insertions(+) create mode 100644 stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml create mode 100644 stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml create mode 100755 stack_orchestrator/data/config/uniswap-interface/build-app.sh create mode 100755 stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh create mode 100755 stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh create mode 100755 stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh create mode 100755 stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh create mode 100755 stack_orchestrator/data/config/urbit/run-urbit-ship.sh create mode 100644 stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile create mode 100755 stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh create mode 100644 stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile create mode 100755 stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh create mode 100644 stack_orchestrator/data/stacks/uniswap-urbit-app/README.md create mode 100644 stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml new file mode 100644 index 00000000..f6a5c53f --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -0,0 +1,49 @@ +version: "3.2" + +services: + uniswap-interface: + image: cerc/uniswap-interface:local + restart: on-failure + environment: + - REACT_APP_INFURA_KEY=${CERC_INFURA_KEY} + - REACT_APP_AWS_API_ENDPOINT=${CERC_UNISWAP_GQL} + command: ["./build-app.sh"] + volumes: + - app_builds:/app-builds + - ../config/uniswap-interface/build-app.sh:/app/build-app.sh + + uniswap-glob-host: + image: cerc/urbit-globs-host:local + restart: unless-stopped + depends_on: + uniswap-interface: + condition: service_completed_successfully + command: ["./host-uniswap-glob.sh"] + volumes: + - app_globs:/app-globs + - ../config/uniswap-interface/host-uniswap-glob.sh:/app/host-uniswap-glob.sh + ports: + - "3000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + uniswap-gql-proxy: + image: cerc/uniswap-interface:local + restart: on-failure + command: ["bash", "-c", "yarn proxy-gql"] + ports: + - "4000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "4000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + +volumes: + app_builds: + app_globs: diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml new file mode 100644 index 00000000..ae0b3709 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml @@ -0,0 +1,26 @@ +version: '3.7' + +services: + urbit-fake-ship: + restart: unless-stopped + image: tloncorp/vere + entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"] + volumes: + - urbit_data:/urbit + - app_builds:/app-builds + - app_globs:/app-globs + - ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh + - ../config/uniswap-interface/deploy-uniswap-app.sh:/urbit/deploy-uniswap-app.sh + ports: + - "80" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "80"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + +volumes: + urbit_data: + app_builds: + app_globs: diff --git a/stack_orchestrator/data/config/uniswap-interface/build-app.sh b/stack_orchestrator/data/config/uniswap-interface/build-app.sh new file mode 100755 index 00000000..d3b012e6 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/build-app.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Check and exit if a deployment already exists (on restarts) +if [ -d /app-builds/uniswap/build ]; then + echo "Build already exists, remove volume to rebuild" + exit 0 +fi + +yarn build + +# Move build to app-builds so urbit can deploy it +mkdir /app-builds/uniswap +cp -r ./build /app-builds/uniswap/ diff --git a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh new file mode 100755 index 00000000..6c147083 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +uniswap_app_build='/app-builds/uniswap/build' +uniswap_desk_dir='/urbit/zod/uniswap' + +if [ -d ${uniswap_desk_dir} ]; then + echo "Uniswap desk dir already exists, skipping deployment..." + exit 0 +fi + +# Fire curl requests to perform operations on the ship +dojo () { + curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321 +} + +hood () { + curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321 +} + +# Create/mount a uniswap desk +hood "merge %uniswap our %landscape" +hood "mount %uniswap" + +# Loop until the uniswap build appears +while [ ! -d ${uniswap_app_build} ]; do + echo "Uniswap app build not found, retrying in 5s..." + sleep 5 +done +echo "Build found..." + +# Copy over build to desk data dir +cp -r ${uniswap_app_build} ${uniswap_desk_dir} + +# Create a mark file for .map file type +cat << EOF > "${uniswap_desk_dir}/mar/map.hoon" +:: +:::: /hoon/map/mar + :: Mark for js source maps +/? 310 +:: +=, eyre +|_ mud=@ +++ grow + |% + ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))] + -- +++ grab + |% :: convert from + ++ mime |=([p=mite q=octs] (@t q.q)) + ++ noun cord :: clam from %noun + -- +++ grad %mime +-- +EOF + +# Create a mark file for .woff file type +cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/woff dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +# Create a mark file for .ttf file type +cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/ttf dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +rm "${uniswap_desk_dir}/desk.bill" +rm "${uniswap_desk_dir}/desk.ship" + +# Commit changes and create a glob +hood "commit %uniswap" +dojo "-landscape!make-glob %uniswap /build" + +echo "Copying over glob file to mounted volume" +mkdir -p /app-globs/uniswap +cp /urbit/zod/.urb/put/* /app-globs/uniswap/ + +glob_file=$(ls -1 -c zod/.urb/put | head -1) +echo "Glob filename: ${glob_file}" + +# Curl and wait for the glob to be hosted +glob_url="http://uniswap-glob-host:3000/${glob_file}" + +echo "Checking if glob file hosted at ${glob_url}" +while true; do + response=$(curl -sL -w "%{http_code}" -o /dev/null "$glob_url") + + if [ $response -eq 200 ]; then + echo "File found at $glob_url" + break # Exit the loop if the file is found + else + echo "File not found. Retrying in a few seconds..." + sleep 5 + fi +done + +glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") + +# Update the docket file +cat << EOF > "${uniswap_desk_dir}/desk.docket-0" +:~ title+'Uniswap' + info+'Self-hosted uniswap frontend.' + color+0xcd.75df + image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' + base+'uniswap' + glob-http+['http://uniswap-glob-host:3000/${glob_file}' ${glob_hash}] + version+[0 0 1] + website+'https://uniswap.org/' + license+'MIT' +== +EOF + +# Commit changes and install the app +hood "commit %uniswap" +hood "install our %uniswap" + +echo "Uniswap app installed" diff --git a/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh b/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh new file mode 100755 index 00000000..37605794 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Use config from mounted volume (when running web-app along with watcher stack) +echo "Waiting for uniswap app glob" +while [ ! -d /app-globs/uniswap ]; do + echo "Glob directory not found, retrying in 5 seconds..." + sleep 5 +done + + +# Copy to a new globs directory +mkdir -p globs +cp -r /app-globs/uniswap/* ./globs + +# Serve the glob file +cd globs +echo "Hosting glob file at port 3000" +python3 -m http.server 3000 --bind 0.0.0.0 diff --git a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh new file mode 100755 index 00000000..679bc27e --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +# $1: Glob file URL (eg. https://xyz.com/glob-abcd.glob) +# $2: Uniswap desk dir (default: ./zod/uniswap) + +if [ -z "$1" ]; then + echo "Glob file URL arg not provided" + exit 0 +fi + +glob_url=$1 +glob_file=$(basename "$glob_url") +glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") +echo "Using glob file ${glob_file}" + +# Default desk dir: ./zod/uniswap +uniswap_desk_dir="${2:-./zod/uniswap}" + +echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path" + +# Fire curl requests to perform operations on the ship +dojo () { + curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321 +} + +hood () { + curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321 +} + +# Create/mount a uniswap desk +hood "merge %uniswap our %landscape" +hood "mount %uniswap" + +# Create a mark file for .map file type +cat << EOF > "${uniswap_desk_dir}/mar/map.hoon" +:: +:::: /hoon/map/mar + :: Mark for js source maps +/? 310 +:: +=, eyre +|_ mud=@ +++ grow + |% + ++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))] + -- +++ grab + |% :: convert from + ++ mime |=([p=mite q=octs] (@t q.q)) + ++ noun cord :: clam from %noun + -- +++ grad %mime +-- +EOF + +# Create a mark file for .woff file type +cat << EOF > "${uniswap_desk_dir}/mar/woff.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/woff dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +# Create a mark file for .ttf file type +cat << EOF > "${uniswap_desk_dir}/mar/ttf.hoon" +|_ dat=octs +++ grow + |% + ++ mime [/font/ttf dat] + -- +++ grab + |% + ++ mime |=([=mite =octs] octs) + ++ noun octs + -- +++ grad %mime +-- +EOF + +rm "${uniswap_desk_dir}/desk.bill" +rm "${uniswap_desk_dir}/desk.ship" + +# Update the docket file +cat << EOF > "${uniswap_desk_dir}/desk.docket-0" +:~ title+'Uniswap' + info+'Self-hosted uniswap frontend.' + color+0xcd.75df + image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' + base+'uniswap' + glob-http+['${glob_url}' ${glob_hash}] + version+[0 0 1] + website+'https://uniswap.org/' + license+'MIT' +== +EOF + +# Commit changes and install the app +hood "commit %uniswap" +hood "install our %uniswap" + +echo "Uniswap app installed" diff --git a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh new file mode 100755 index 00000000..528151e9 --- /dev/null +++ b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# $1: Remote user host +# $2: Path to run the app installation in (where urbit ship dir is located) +# $3: Glob file URL (eg. https://xyz.com/glob-abcd.glob) + +if [ "$#" -ne 3 ]; then + echo "Usage: $0 " + exit 1 +fi + +remote_user_host="$1" +remote_folder="$2" +glob_url="$3" + +installation_script="./install-uniswap-app.sh" + +ssh "$remote_user_host" "cd $remote_folder && bash -s $glob_url" < "$installation_script" diff --git a/stack_orchestrator/data/config/urbit/run-urbit-ship.sh b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh new file mode 100755 index 00000000..bb301c81 --- /dev/null +++ b/stack_orchestrator/data/config/urbit/run-urbit-ship.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +pier_dir="/urbit/zod" + +# Run urbit ship in daemon mode +# Check if the directory exists +if [ -d "$pier_dir" ]; then + echo "Pier directory already exists, rebooting..." + urbit -d zod +else + echo "Creating a new fake ship..." + urbit -d -F zod +fi diff --git a/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile b/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile new file mode 100644 index 00000000..59804896 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-uniswap-interface/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git make alpine-sdk bash + +WORKDIR /app + +COPY . . + +RUN echo "Building uniswap-interface" && \ + yarn diff --git a/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh b/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh new file mode 100755 index 00000000..af1971b5 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-uniswap-interface/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Build the uniswap-interface image +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/uniswap-interface:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-interface diff --git a/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile new file mode 100644 index 00000000..7a3ca9b7 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.13.0a2-alpine3.18 + +RUN apk --update --no-cache add alpine-sdk jq bash curl wget + +WORKDIR /app + +ENTRYPOINT [ "bash" ] diff --git a/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh new file mode 100755 index 00000000..ebd396f1 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-urbit-globs-host/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build the urbit-globs-host image + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/urbit-globs-host:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${SCRIPT_DIR} diff --git a/stack_orchestrator/data/container-image-list.txt b/stack_orchestrator/data/container-image-list.txt index 41dd8b21..fd295be5 100644 --- a/stack_orchestrator/data/container-image-list.txt +++ b/stack_orchestrator/data/container-image-list.txt @@ -59,3 +59,4 @@ cerc/ponder cerc/nitro-rpc-client cerc/watcher-merkl-sushiswap-v3 cerc/watcher-sushiswap-v3 +cerc/uniswap-interface diff --git a/stack_orchestrator/data/repository-list.txt b/stack_orchestrator/data/repository-list.txt index 192a831e..cddaccce 100644 --- a/stack_orchestrator/data/repository-list.txt +++ b/stack_orchestrator/data/repository-list.txt @@ -49,3 +49,4 @@ github.com/cerc-io/mobymask-snap github.com/cerc-io/ponder github.com/cerc-io/merkl-sushiswap-v3-watcher-ts github.com/cerc-io/sushiswap-v3-watcher-ts +github.com/cerc-io/uniswap-interface diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md new file mode 100644 index 00000000..cd6a9f3e --- /dev/null +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -0,0 +1,122 @@ +# Self-hosted Uniswap Frontend + +Instructions to setup and deploy Uniswap app on Urbit + +Build and deploy: + +- Urbit +- Uniswap app + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack uniswap-urbit-app setup-repositories --pull + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container images: + +```bash +laconic-so --stack uniswap-urbit-app build-containers +``` + +## Create a deployment + +First, create a spec file for the deployment, which will map the stack's ports and volumes to the host: + +```bash +laconic-so --stack uniswap-urbit-app deploy init --output uniswap-urbit-app-spec.yml +``` + +### Ports + +Edit `network` in spec file to map container ports to same ports in host + +``` +... +network: + ports: + urbit-fake-ship: + - '8080:80' + uniswap-glob-host: + - '3000:3000' + uniswap-gql-proxy: + - '4000:4000' +... +``` + +### Data volumes + +Container data volumes are bind-mounted to specified paths in the host filesystem. +The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`. + +--- + +Once you've made any needed changes to the spec file, create a deployment from it: + +```bash +laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app-spec.yml --deployment-dir uniswap-urbit-app-deployment +``` + +## Set env variables + +Inside the deployment directory, open the file `config.env` and add variable for infura key : + + ```bash + # External RPC endpoints + # https://docs.infura.io/getting-started#2-create-an-api-key + CERC_INFURA_KEY= + + # Uniswap API GQL Endpoint + # Set this to GQL proxy server endpoint for uniswap app + # (Eg. http://localhost:4000/graphql) + CERC_UNISWAP_GQL= + ``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment start +``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + +* Run the following to get login password for Urbit web interface: + + ```bash + laconic-so deployment --dir uniswap-urbit-app-deployment exec urbit-fake-ship "curl -s --data '{\"source\":{\"dojo\":\"+code\"},\"sink\":{\"stdout\":null}}' http://localhost:12321" + + # Expected output: "\n"% + ``` + +* Open the Urbit web UI at http://localhost:8080 and use the `PASSWORD` from previous step to login + +* The uniswap app is not available when starting stack for the first time. Check `urbit-fake-ship` logs to see that app has installed + ``` + laconic-so deployment --dir uniswap-urbit-app-deployment logs -f + + # Expected output: + # laconic-3ccf7ee79bdae874-urbit-fake-ship-1 | docket: fetching %http glob for %uniswap desk + # laconic-3ccf7ee79bdae874-urbit-fake-ship-1 | ">="">="Uniswap app installed + ``` + +* The uniswap app will be now visible at http://localhost:8080 + +## Clean up + +To stop all uniswap-urbit-app services running in the background, while preserving chain data: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment stop +``` + +To stop all uniswap-urbit-app services and also delete data: + +```bash +laconic-so deployment --dir uniswap-urbit-app-deployment stop --delete-volumes +``` diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml new file mode 100644 index 00000000..1077b557 --- /dev/null +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml @@ -0,0 +1,10 @@ +version: "0.1" +name: uniswap-urbit-app +repos: + - github.com/cerc-io/uniswap-interface@laconic # TODO: Use release +containers: + - cerc/uniswap-interface + - cerc/urbit-globs-host +pods: + - uniswap-interface + - uniswap-urbit From ab0e70ed83ffcd6baef7d635ea8675dfd0973cee Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Mon, 4 Dec 2023 13:39:14 -0600 Subject: [PATCH 57/62] Change path portion of unique cluster name to point to compose file, not argv[0]. (#678) --- stack_orchestrator/deploy/deploy.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index df231e74..424d112f 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -271,8 +271,10 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): if cluster is None: # Create default unique, stable cluster name from confile file path and stack name if provided - # TODO: change this to the config file path - path = os.path.realpath(sys.argv[0]) + if deployment: + path = os.path.realpath(os.path.abspath(compose_dir)) + else: + path = "internal" unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" if ctx.debug: print(f"pre-hash descriptor: {unique_cluster_descriptor}") From 2dd54892a1fa691b5f1b7b92313cc4d9c6d3846b Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Mon, 4 Dec 2023 21:39:16 -0600 Subject: [PATCH 58/62] Allow specifying the webapp tag explicitly (#675) --- stack_orchestrator/build/build_webapp.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stack_orchestrator/build/build_webapp.py b/stack_orchestrator/build/build_webapp.py index ace334c4..287347eb 100644 --- a/stack_orchestrator/build/build_webapp.py +++ b/stack_orchestrator/build/build_webapp.py @@ -32,8 +32,9 @@ from stack_orchestrator.build import build_containers @click.option('--source-repo', help="directory containing the webapp to build", required=True) @click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") @click.option("--extra-build-args", help="Supply extra arguments to build") +@click.option("--tag", help="Container tag (default: cerc/:local)") @click.pass_context -def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): +def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag): '''build the specified webapp container''' quiet = ctx.obj.quiet @@ -70,8 +71,11 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args): container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp") - webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] - container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" + if not tag: + webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1] + container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local" + else: + container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet, verbose, dry_run, continue_on_error) From f27da1980890dee660dc6d3b1b9c82a7d7770230 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:00:03 +0530 Subject: [PATCH 59/62] Use IPFS for hosting glob files for Urbit (#677) * Use IPFS for hosting glob files for Urbit * Add env configuration for IPFS endpoints to instructions * Make ship pier dir configurable in remote deployment script * Update remote deployment script to accept glob hash arg --- .../docker-compose-uniswap-interface.yml | 19 --------------- .../compose/docker-compose-uniswap-urbit.yml | 20 ++++++++++++++++ .../uniswap-interface/deploy-uniswap-app.sh | 21 +++++++++++------ .../uniswap-interface/host-uniswap-glob.sh | 23 ------------------- .../uniswap-interface/install-uniswap-app.sh | 19 +++++++-------- .../remote-deploy-uniswap.sh | 15 +++++++----- .../data/stacks/uniswap-urbit-app/README.md | 15 ++++++++++-- 7 files changed, 66 insertions(+), 66 deletions(-) delete mode 100755 stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml index f6a5c53f..6d021961 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -12,25 +12,6 @@ services: - app_builds:/app-builds - ../config/uniswap-interface/build-app.sh:/app/build-app.sh - uniswap-glob-host: - image: cerc/urbit-globs-host:local - restart: unless-stopped - depends_on: - uniswap-interface: - condition: service_completed_successfully - command: ["./host-uniswap-glob.sh"] - volumes: - - app_globs:/app-globs - - ../config/uniswap-interface/host-uniswap-glob.sh:/app/host-uniswap-glob.sh - ports: - - "3000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "3000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - uniswap-gql-proxy: image: cerc/uniswap-interface:local restart: on-failure diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml index ae0b3709..31fa99bf 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-urbit.yml @@ -4,6 +4,9 @@ services: urbit-fake-ship: restart: unless-stopped image: tloncorp/vere + environment: + CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs-glob-host:5001} + CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs-glob-host:8080} entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-uniswap-app.sh && tail -f /dev/null"] volumes: - urbit_data:/urbit @@ -20,7 +23,24 @@ services: retries: 15 start_period: 10s + ipfs-glob-host: + image: ipfs/kubo:master-2023-02-20-714a968 + volumes: + - ipfs-import:/import + - ipfs-data:/data/ipfs + ports: + - "8080" + - "5001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + volumes: urbit_data: app_builds: app_globs: + ipfs-import: + ipfs-data: diff --git a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh index 6c147083..f07a205b 100755 --- a/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh +++ b/stack_orchestrator/data/config/uniswap-interface/deploy-uniswap-app.sh @@ -4,6 +4,11 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi +echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting globs" +echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading glob files" +ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT} +ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT} + uniswap_app_build='/app-builds/uniswap/build' uniswap_desk_dir='/urbit/zod/uniswap' @@ -96,15 +101,17 @@ rm "${uniswap_desk_dir}/desk.ship" hood "commit %uniswap" dojo "-landscape!make-glob %uniswap /build" -echo "Copying over glob file to mounted volume" -mkdir -p /app-globs/uniswap -cp /urbit/zod/.urb/put/* /app-globs/uniswap/ - glob_file=$(ls -1 -c zod/.urb/put | head -1) -echo "Glob filename: ${glob_file}" +echo "Created glob file: ${glob_file}" + +upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add) +glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//') + +echo "Glob file uploaded to IFPS:" +echo "{ cid: ${glob_cid}, filename: ${glob_file} }" # Curl and wait for the glob to be hosted -glob_url="http://uniswap-glob-host:3000/${glob_file}" +glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}" echo "Checking if glob file hosted at ${glob_url}" while true; do @@ -128,7 +135,7 @@ cat << EOF > "${uniswap_desk_dir}/desk.docket-0" color+0xcd.75df image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg' base+'uniswap' - glob-http+['http://uniswap-glob-host:3000/${glob_file}' ${glob_hash}] + glob-http+['${glob_url}' ${glob_hash}] version+[0 0 1] website+'https://uniswap.org/' license+'MIT' diff --git a/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh b/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh deleted file mode 100755 index 37605794..00000000 --- a/stack_orchestrator/data/config/uniswap-interface/host-uniswap-glob.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -e -if [ -n "$CERC_SCRIPT_DEBUG" ]; then - set -x -fi - -# Use config from mounted volume (when running web-app along with watcher stack) -echo "Waiting for uniswap app glob" -while [ ! -d /app-globs/uniswap ]; do - echo "Glob directory not found, retrying in 5 seconds..." - sleep 5 -done - - -# Copy to a new globs directory -mkdir -p globs -cp -r /app-globs/uniswap/* ./globs - -# Serve the glob file -cd globs -echo "Hosting glob file at port 3000" -python3 -m http.server 3000 --bind 0.0.0.0 diff --git a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh index 679bc27e..2463e1c7 100755 --- a/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh +++ b/stack_orchestrator/data/config/uniswap-interface/install-uniswap-app.sh @@ -1,21 +1,22 @@ #!/bin/bash -# $1: Glob file URL (eg. https://xyz.com/glob-abcd.glob) -# $2: Uniswap desk dir (default: ./zod/uniswap) +# $1: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $2: Glob file hash (eg. 0vabcd) +# $3: Urbit ship's pier dir (default: ./zod) -if [ -z "$1" ]; then - echo "Glob file URL arg not provided" +if [ "$#" -lt 2 ]; then + echo "Insufficient arguments" exit 0 fi glob_url=$1 -glob_file=$(basename "$glob_url") -glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/") -echo "Using glob file ${glob_file}" +glob_hash=$2 +echo "Using glob file from ${glob_url} with hash ${glob_hash}" +# Default pier dir: ./zod # Default desk dir: ./zod/uniswap -uniswap_desk_dir="${2:-./zod/uniswap}" - +pier_dir="${3:-./zod}" +uniswap_desk_dir="${pier_dir}/uniswap" echo "Using ${uniswap_desk_dir} as the Uniswap desk dir path" # Fire curl requests to perform operations on the ship diff --git a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh index 528151e9..31f03d72 100755 --- a/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh +++ b/stack_orchestrator/data/config/uniswap-interface/remote-deploy-uniswap.sh @@ -1,18 +1,21 @@ #!/bin/bash # $1: Remote user host -# $2: Path to run the app installation in (where urbit ship dir is located) -# $3: Glob file URL (eg. https://xyz.com/glob-abcd.glob) +# $2: Remote Urbit ship's pier dir path (eg. /home/user/zod) +# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob) +# $4: Glob file hash (eg. 0vabcd) -if [ "$#" -ne 3 ]; then - echo "Usage: $0 " +if [ "$#" -ne 4 ]; then + echo "Incorrect number of arguments" + echo "Usage: $0 " exit 1 fi remote_user_host="$1" -remote_folder="$2" +remote_pier_folder="$2" glob_url="$3" +glob_hash="$4" installation_script="./install-uniswap-app.sh" -ssh "$remote_user_host" "cd $remote_folder && bash -s $glob_url" < "$installation_script" +ssh "$remote_user_host" "bash -s $glob_url $glob_hash $remote_pier_folder" < "$installation_script" diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md index cd6a9f3e..55a37338 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -41,10 +41,11 @@ network: ports: urbit-fake-ship: - '8080:80' - uniswap-glob-host: - - '3000:3000' uniswap-gql-proxy: - '4000:4000' + ipfs-glob-host: + - '8081:8080' + - '5001:5001' ... ``` @@ -74,6 +75,16 @@ Inside the deployment directory, open the file `config.env` and add variable for # Set this to GQL proxy server endpoint for uniswap app # (Eg. http://localhost:4000/graphql) CERC_UNISWAP_GQL= + + # Optional IPFS endpoints: + + # IFPS endpoint to host the glob file on + # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) + CERC_IPFS_GLOB_HOST_ENDPOINT= + + # IFPS endpoint to fetch the glob file from + # (Default: http://ipfs-glob-host:8080 pointing to in-stack IPFS node) + CERC_IPFS_SERVER_ENDPOINT= ``` ## Start the stack From 6bef0c5b2f83c5e6710b0efb5ad685d6015ffa1f Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:41:10 +0530 Subject: [PATCH 60/62] Separate out GQL proxy server from uniswap-urbit stack (#681) * Separate out uniswap gql proxy in a stack * Use proxy server from watcher-ts * Add a flag to enable/disable the proxy server * Update env configuratoin for uniswap urbit app stack * Update stack file for uniswap urbit app stack * Fix env variables in instructions --- .../compose/docker-compose-proxy-server.yml | 22 ++++++ .../docker-compose-uniswap-interface.yml | 13 --- .../data/config/proxy-server/run.sh | 9 +++ .../data/stacks/proxy-server/README.md | 79 +++++++++++++++++++ .../data/stacks/proxy-server/stack.yml | 8 ++ .../data/stacks/uniswap-urbit-app/README.md | 28 +++++-- .../data/stacks/uniswap-urbit-app/stack.yml | 3 + 7 files changed, 144 insertions(+), 18 deletions(-) create mode 100644 stack_orchestrator/data/compose/docker-compose-proxy-server.yml create mode 100755 stack_orchestrator/data/config/proxy-server/run.sh create mode 100644 stack_orchestrator/data/stacks/proxy-server/README.md create mode 100644 stack_orchestrator/data/stacks/proxy-server/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-proxy-server.yml b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml new file mode 100644 index 00000000..607e8d23 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-proxy-server.yml @@ -0,0 +1,22 @@ +version: "3.2" + +services: + proxy-server: + image: cerc/watcher-ts:local + restart: on-failure + working_dir: /app/packages/cli + environment: + ENABLE_PROXY: ${ENABLE_PROXY:-true} + PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM} + PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER} + command: ["sh", "-c", "./run.sh"] + volumes: + - ../config/proxy-server/run.sh:/app/packages/cli/run.sh + ports: + - "4000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "4000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s diff --git a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml index 6d021961..85b71af2 100644 --- a/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml +++ b/stack_orchestrator/data/compose/docker-compose-uniswap-interface.yml @@ -12,19 +12,6 @@ services: - app_builds:/app-builds - ../config/uniswap-interface/build-app.sh:/app/build-app.sh - uniswap-gql-proxy: - image: cerc/uniswap-interface:local - restart: on-failure - command: ["bash", "-c", "yarn proxy-gql"] - ports: - - "4000" - healthcheck: - test: ["CMD", "nc", "-v", "localhost", "4000"] - interval: 20s - timeout: 5s - retries: 15 - start_period: 10s - volumes: app_builds: app_globs: diff --git a/stack_orchestrator/data/config/proxy-server/run.sh b/stack_orchestrator/data/config/proxy-server/run.sh new file mode 100755 index 00000000..9e8dc7f5 --- /dev/null +++ b/stack_orchestrator/data/config/proxy-server/run.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +if [ "$ENABLE_PROXY" = "true" ]; then + echo "Proxy server enabled" + yarn proxy +else + echo "Proxy server disabled, exiting" + exit 0 +fi diff --git a/stack_orchestrator/data/stacks/proxy-server/README.md b/stack_orchestrator/data/stacks/proxy-server/README.md new file mode 100644 index 00000000..f0ccdb0f --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/README.md @@ -0,0 +1,79 @@ +# Proxy Server + +Instructions to setup and deploy a HTTP proxy server + +## Setup + +Clone required repository: + +```bash +laconic-so --stack proxy-server setup-repositories --pull + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Build the container image: + +```bash +laconic-so --stack proxy-server build-containers +``` + +## Create a deployment + +* First, create a spec file for the deployment, which will allow mapping the stack's ports and volumes to the host: + + ```bash + laconic-so --stack proxy-server deploy init --output proxy-server-spec.yml + ``` + +* Edit `network` in spec file to map container ports to same ports in host: + + ```yml + ... + network: + ports: + proxy-server: + - '4000:4000' + ... + ``` + +* Once you've made any needed changes to the spec file, create a deployment from it: + + ```bash + laconic-so --stack proxy-server deploy create --spec-file proxy-server-spec.yml --deployment-dir proxy-server-deployment + ``` + +* Inside the deployment directory, open the file `config.env` and set the following env variables: + + ```bash + # Whether to run the proxy server (Optional) (Default: true) + ENABLE_PROXY= + + # Upstream endpoint + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM= + + # Origin header to be used (Optional) + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER= + ``` + +## Start the stack + +Start the deployment: + +```bash +laconic-so deployment --dir proxy-server-deployment start +``` + +* List and check the health status of the container using `docker ps` + +* The proxy server will now be listening at http://localhost:4000 + +## Clean up + +To stop the service running in background: + +```bash +laconic-so deployment --dir proxy-server-deployment stop +``` diff --git a/stack_orchestrator/data/stacks/proxy-server/stack.yml b/stack_orchestrator/data/stacks/proxy-server/stack.yml new file mode 100644 index 00000000..313a7f91 --- /dev/null +++ b/stack_orchestrator/data/stacks/proxy-server/stack.yml @@ -0,0 +1,8 @@ +version: "0.1" +name: proxy-server +repos: + - github.com/cerc-io/watcher-ts@v0.2.78 +containers: + - cerc/watcher-ts +pods: + - proxy-server diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md index 55a37338..7499f5fc 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/README.md @@ -41,7 +41,7 @@ network: ports: urbit-fake-ship: - '8080:80' - uniswap-gql-proxy: + proxy-server: - '4000:4000' ipfs-glob-host: - '8081:8080' @@ -64,7 +64,7 @@ laconic-so --stack uniswap-urbit-app deploy create --spec-file uniswap-urbit-app ## Set env variables -Inside the deployment directory, open the file `config.env` and add variable for infura key : +Inside the deployment directory, open the file `config.env` and set the following env variables: ```bash # External RPC endpoints @@ -73,10 +73,28 @@ Inside the deployment directory, open the file `config.env` and add variable for # Uniswap API GQL Endpoint # Set this to GQL proxy server endpoint for uniswap app - # (Eg. http://localhost:4000/graphql) + # (Eg. http://localhost:4000/v1/graphql) + # (Eg. https://abc.xyz.com/v1/graphql) CERC_UNISWAP_GQL= - # Optional IPFS endpoints: + # Optional + + # Whether to run the proxy GQL server + # (Disable only if proxy not required to be run) (Default: true) + ENABLE_PROXY= + + # Proxy server configuration + # Used only if proxy is enabled + + # Upstream API URL + # (Eg. https://api.example.org) + CERC_PROXY_UPSTREAM=https://api.uniswap.org + + # Origin header to be used in the proxy + # (Eg. https://app.example.org) + CERC_PROXY_ORIGIN_HEADER=https://app.uniswap.org + + # IPFS configuration # IFPS endpoint to host the glob file on # (Default: http://ipfs-glob-host:5001 pointing to in-stack IPFS node) @@ -120,7 +138,7 @@ laconic-so deployment --dir uniswap-urbit-app-deployment start ## Clean up -To stop all uniswap-urbit-app services running in the background, while preserving chain data: +To stop all uniswap-urbit-app services running in the background, while preserving data: ```bash laconic-so deployment --dir uniswap-urbit-app-deployment stop diff --git a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml index 1077b557..3f77098f 100644 --- a/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml +++ b/stack_orchestrator/data/stacks/uniswap-urbit-app/stack.yml @@ -2,9 +2,12 @@ version: "0.1" name: uniswap-urbit-app repos: - github.com/cerc-io/uniswap-interface@laconic # TODO: Use release + - github.com/cerc-io/watcher-ts@v0.2.78 containers: - cerc/uniswap-interface + - cerc/watcher-ts - cerc/urbit-globs-host pods: - uniswap-interface + - proxy-server - uniswap-urbit From 15faed00de8e510b4bdd5150eef1c26248ae3d19 Mon Sep 17 00:00:00 2001 From: David Boreham Date: Tue, 5 Dec 2023 22:56:58 -0700 Subject: [PATCH 61/62] Generate a unique deployment id for each deployment (#680) * Move cluster name generation into a function * Generate a unique deployment id for each deployment --- stack_orchestrator/constants.py | 4 +++ stack_orchestrator/deploy/deploy.py | 36 ++++++++++++------- stack_orchestrator/deploy/deployment.py | 2 +- .../deploy/deployment_context.py | 28 +++++++++++++-- .../deploy/deployment_create.py | 18 +++++++--- 5 files changed, 68 insertions(+), 20 deletions(-) diff --git a/stack_orchestrator/constants.py b/stack_orchestrator/constants.py index 54cfe355..596b0c1b 100644 --- a/stack_orchestrator/constants.py +++ b/stack_orchestrator/constants.py @@ -13,12 +13,16 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +cluster_name_prefix = "laconic-" stack_file_name = "stack.yml" spec_file_name = "spec.yml" config_file_name = "config.env" +deployment_file_name = "deployment.yml" +compose_dir_name = "compose" compose_deploy_type = "compose" k8s_kind_deploy_type = "k8s-kind" k8s_deploy_type = "k8s" +cluster_id_key = "cluster-id" kube_config_key = "kube-config" deploy_to_key = "deploy-to" network_key = "network" diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index 424d112f..d1b64743 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -24,6 +24,8 @@ from importlib import resources import subprocess import click from pathlib import Path +from stack_orchestrator import constants +from stack_orchestrator.opts import opts from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path from stack_orchestrator.deploy.deployer import Deployer, DeployerException from stack_orchestrator.deploy.deployer_factory import getDeployer @@ -70,6 +72,9 @@ def create_deploy_context( cluster, env_file, deploy_to) -> DeployCommandContext: + # Extract the cluster name from the deployment, if we have one + if deployment_context and cluster is None: + cluster = deployment_context.get_cluster_id() cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file) deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, @@ -253,6 +258,22 @@ def _make_runtime_env(ctx): return container_exec_env +def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude): + # Create default unique, stable cluster name from confile file path and stack name if provided + if deployment: + path = os.path.realpath(os.path.abspath(compose_dir)) + else: + path = "internal" + unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" + if opts.o.debug: + print(f"pre-hash descriptor: {unique_cluster_descriptor}") + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + cluster = f"{constants.cluster_name_prefix}{hash}" + if opts.o.debug: + print(f"Using cluster name: {cluster}") + return cluster + + # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): @@ -270,18 +291,9 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file): compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose") if cluster is None: - # Create default unique, stable cluster name from confile file path and stack name if provided - if deployment: - path = os.path.realpath(os.path.abspath(compose_dir)) - else: - path = "internal" - unique_cluster_descriptor = f"{path},{stack},{include},{exclude}" - if ctx.debug: - print(f"pre-hash descriptor: {unique_cluster_descriptor}") - hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] - cluster = f"laconic-{hash}" - if ctx.verbose: - print(f"Using cluster name: {cluster}") + cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) + else: + _make_default_cluster_name(deployment, compose_dir, stack, include, exclude) # See: https://stackoverflow.com/a/20885799/1701505 from stack_orchestrator import data diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index 8d74a62d..cc70519e 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -52,7 +52,7 @@ def make_deploy_context(ctx) -> DeployCommandContext: context: DeploymentContext = ctx.obj stack_file_path = context.get_stack_file() env_file = context.get_env_file() - cluster_name = context.get_cluster_name() + cluster_name = context.get_cluster_id() if constants.deploy_to_key in context.spec.obj: deployment_type = context.spec.obj[constants.deploy_to_key] else: diff --git a/stack_orchestrator/deploy/deployment_context.py b/stack_orchestrator/deploy/deployment_context.py index cbee4151..27e32812 100644 --- a/stack_orchestrator/deploy/deployment_context.py +++ b/stack_orchestrator/deploy/deployment_context.py @@ -14,15 +14,19 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import hashlib +import os from pathlib import Path from stack_orchestrator import constants +from stack_orchestrator.util import get_yaml from stack_orchestrator.deploy.stack import Stack from stack_orchestrator.deploy.spec import Spec class DeploymentContext: deployment_dir: Path + id: str spec: Spec stack: Stack @@ -35,9 +39,14 @@ class DeploymentContext: def get_env_file(self): return self.deployment_dir.joinpath(constants.config_file_name) - # TODO: implement me - def get_cluster_name(self): - return None + def get_deployment_file(self): + return self.deployment_dir.joinpath(constants.deployment_file_name) + + def get_compose_dir(self): + return self.deployment_dir.joinpath(constants.compose_dir_name) + + def get_cluster_id(self): + return self.id def init(self, dir): self.deployment_dir = dir @@ -45,3 +54,16 @@ class DeploymentContext: self.spec.init_from_file(self.get_spec_file()) self.stack = Stack(self.spec.obj["stack"]) self.stack.init_from_file(self.get_stack_file()) + deployment_file_path = self.get_deployment_file() + if deployment_file_path.exists(): + with deployment_file_path: + obj = get_yaml().load(open(deployment_file_path, "r")) + self.id = obj[constants.cluster_id_key] + # Handle the case of a legacy deployment with no file + # Code below is intended to match the output from _make_default_cluster_name() + # TODO: remove when we no longer need to support legacy deployments + else: + path = os.path.realpath(os.path.abspath(self.get_compose_dir())) + unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None" + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16] + self.id = f"{constants.cluster_name_prefix}{hash}" diff --git a/stack_orchestrator/deploy/deployment_create.py b/stack_orchestrator/deploy/deployment_create.py index 88ce0b2a..9eaea30c 100644 --- a/stack_orchestrator/deploy/deployment_create.py +++ b/stack_orchestrator/deploy/deployment_create.py @@ -20,6 +20,7 @@ from pathlib import Path from typing import List import random from shutil import copy, copyfile, copytree +from secrets import token_hex import sys from stack_orchestrator import constants from stack_orchestrator.opts import opts @@ -276,7 +277,7 @@ def init(ctx, config, config_file, kube_config, image_registry, output, map_port # call it from other commands, bypassing the click decoration stuff def init_operation(deploy_command_context, stack, deployer_type, config, config_file, kube_config, image_registry, output, map_ports_to_host): - yaml = get_yaml() + default_spec_file_content = call_stack_deploy_init(deploy_command_context) spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type} if deployer_type == "k8s": @@ -311,8 +312,6 @@ def init_operation(deploy_command_context, stack, deployer_type, config, new_config = config_file_variables merged_config = {**new_config, **orig_config} spec_file_content.update({"config": merged_config}) - if opts.o.debug: - print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") ports = _get_mapped_ports(stack, map_ports_to_host) spec_file_content.update({"network": {"ports": ports}}) @@ -324,8 +323,11 @@ def init_operation(deploy_command_context, stack, deployer_type, config, volume_descriptors[named_volume] = f"./data/{named_volume}" spec_file_content["volumes"] = volume_descriptors + if opts.o.debug: + print(f"Creating spec file for stack: {stack} with content: {spec_file_content}") + with open(output, "w") as output_file: - yaml.dump(spec_file_content, output_file) + get_yaml().dump(spec_file_content, output_file) def _write_config_file(spec_file: Path, config_env_file: Path): @@ -351,6 +353,13 @@ def _copy_files_to_directory(file_paths: List[Path], directory: Path): copy(path, os.path.join(directory, os.path.basename(path))) +def _create_deployment_file(deployment_dir: Path): + deployment_file_path = deployment_dir.joinpath(constants.deployment_file_name) + cluster = f"{constants.cluster_name_prefix}{token_hex(8)}" + with open(deployment_file_path, "w") as output_file: + output_file.write(f"{constants.cluster_id_key}: {cluster}\n") + + @click.command() @click.option("--spec-file", required=True, help="Spec file to use to create this deployment") @click.option("--deployment-dir", help="Create deployment files in this directory") @@ -383,6 +392,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw # Copy spec file and the stack file into the deployment dir copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name)) copyfile(stack_file, deployment_dir_path.joinpath(os.path.basename(stack_file))) + _create_deployment_file(deployment_dir_path) # Copy any config varibles from the spec file into an env file suitable for compose _write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name)) # Copy any k8s config file into the deployment dir From 077ea80c7030b000bbe0775ff944ab977c4c18bc Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 6 Dec 2023 10:27:47 -0600 Subject: [PATCH 62/62] Add `deployment status` command and fix k8s output for `deployment ps` (#679) --- .../deploy/compose/deploy_docker.py | 7 ++ stack_orchestrator/deploy/deploy.py | 8 ++ stack_orchestrator/deploy/deployer.py | 4 + stack_orchestrator/deploy/deployment.py | 5 +- stack_orchestrator/deploy/k8s/deploy_k8s.py | 90 +++++++++++++++++-- 5 files changed, 104 insertions(+), 10 deletions(-) diff --git a/stack_orchestrator/deploy/compose/deploy_docker.py b/stack_orchestrator/deploy/compose/deploy_docker.py index 04f24df5..d34d1e6f 100644 --- a/stack_orchestrator/deploy/compose/deploy_docker.py +++ b/stack_orchestrator/deploy/compose/deploy_docker.py @@ -40,6 +40,13 @@ class DockerDeployer(Deployer): except DockerException as e: raise DeployerException(e) + def status(self): + try: + for p in self.docker.compose.ps(): + print(f"{p.name}\t{p.state.status}") + except DockerException as e: + raise DeployerException(e) + def ps(self): try: return self.docker.compose.ps() diff --git a/stack_orchestrator/deploy/deploy.py b/stack_orchestrator/deploy/deploy.py index d1b64743..da96a500 100644 --- a/stack_orchestrator/deploy/deploy.py +++ b/stack_orchestrator/deploy/deploy.py @@ -112,6 +112,14 @@ def down_operation(ctx, delete_volumes, extra_args_list): ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes) +def status_operation(ctx): + global_context = ctx.parent.parent.obj + if not global_context.dry_run: + if global_context.verbose: + print("Running compose status") + ctx.obj.deployer.status() + + def ps_operation(ctx): global_context = ctx.parent.parent.obj if not global_context.dry_run: diff --git a/stack_orchestrator/deploy/deployer.py b/stack_orchestrator/deploy/deployer.py index 984945ed..2806044b 100644 --- a/stack_orchestrator/deploy/deployer.py +++ b/stack_orchestrator/deploy/deployer.py @@ -31,6 +31,10 @@ class Deployer(ABC): def ps(self): pass + @abstractmethod + def status(self): + pass + @abstractmethod def port(self, service, private_port): pass diff --git a/stack_orchestrator/deploy/deployment.py b/stack_orchestrator/deploy/deployment.py index cc70519e..366a83f6 100644 --- a/stack_orchestrator/deploy/deployment.py +++ b/stack_orchestrator/deploy/deployment.py @@ -18,7 +18,7 @@ from pathlib import Path import sys from stack_orchestrator import constants from stack_orchestrator.deploy.images import push_images_operation -from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation +from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context from stack_orchestrator.deploy.deploy_types import DeployCommandContext from stack_orchestrator.deploy.deployment_context import DeploymentContext @@ -147,4 +147,5 @@ def logs(ctx, tail, follow, extra_args): @command.command() @click.pass_context def status(ctx): - print(f"Context: {ctx.parent.obj}") + ctx.obj = make_deploy_context(ctx) + status_operation(ctx) diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index c84aa34a..95131966 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -26,6 +26,12 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext from stack_orchestrator.util import error_exit +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + def _check_delete_exception(e: client.exceptions.ApiException): if e.status == 404: if opts.o.debug: @@ -42,7 +48,7 @@ class K8sDeployer(Deployer): networking_api: client.NetworkingV1Api k8s_namespace: str = "default" kind_cluster_name: str - cluster_info : ClusterInfo + cluster_info: ClusterInfo deployment_dir: Path deployment_context: DeploymentContext @@ -72,6 +78,7 @@ class K8sDeployer(Deployer): self.core_api = client.CoreV1Api() self.networking_api = client.NetworkingV1Api() self.apps_api = client.AppsV1Api() + self.custom_obj_api = client.CustomObjectsApi() def up(self, detach, services): @@ -202,15 +209,82 @@ class K8sDeployer(Deployer): # Destroy the kind cluster destroy_cluster(self.kind_cluster_name) - def ps(self): + def status(self): self.connect_api() # Call whatever API we need to get the running container list - ret = self.core_api.list_pod_for_all_namespaces(watch=False) - if ret.items: - for i in ret.items: - print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) - ret = self.core_api.list_node(pretty=True, watch=False) - return [] + all_pods = self.core_api.list_pod_for_all_namespaces(watch=False) + pods = [] + + if all_pods.items: + for p in all_pods.items: + if self.cluster_info.app_name in p.metadata.name: + pods.append(p) + + if not pods: + return + + hostname = "?" + ip = "?" + tls = "?" + try: + ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace, + name=self.cluster_info.get_ingress().metadata.name) + + cert = self.custom_obj_api.get_namespaced_custom_object( + group="cert-manager.io", + version="v1", + namespace=self.k8s_namespace, + plural="certificates", + name=ingress.spec.tls[0].secret_name + ) + + hostname = ingress.spec.tls[0].hosts[0] + ip = ingress.status.load_balancer.ingress[0].ip + tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"]) + except: # noqa: E722 + pass + + print("Ingress:") + print("\tHostname:", hostname) + print("\tIP:", ip) + print("\tTLS:", tls) + print("") + print("Pods:") + + for p in pods: + if p.metadata.deletion_timestamp: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})") + else: + print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})") + + def ps(self): + self.connect_api() + pods = self.core_api.list_pod_for_all_namespaces(watch=False) + + ret = [] + + for p in pods.items: + if self.cluster_info.app_name in p.metadata.name: + pod_ip = p.status.pod_ip + ports = AttrDict() + for c in p.spec.containers: + if c.ports: + for prt in c.ports: + ports[str(prt.container_port)] = [AttrDict({ + "HostIp": pod_ip, + "HostPort": prt.container_port + })] + + ret.append(AttrDict({ + "id": f"{p.metadata.namespace}/{p.metadata.name}", + "name": p.metadata.name, + "namespace": p.metadata.namespace, + "network_settings": AttrDict({ + "ports": ports + }) + })) + + return ret def port(self, service, private_port): # Since we handle the port mapping, need to figure out where this comes from