forked from cerc-io/stack-orchestrator
Merge branch 'main' into ci-test
This commit is contained in:
commit
46b726be9b
@ -62,6 +62,9 @@ services:
|
||||
- fixturenet-eth-bootnode-geth
|
||||
volumes:
|
||||
- fixturenet_eth_geth_2_data:/root/ethdata
|
||||
ports:
|
||||
- "8545"
|
||||
- "8546"
|
||||
|
||||
fixturenet-eth-bootnode-lighthouse:
|
||||
restart: always
|
||||
|
@ -70,6 +70,7 @@ services:
|
||||
command: "/run-op-geth.sh"
|
||||
ports:
|
||||
- "0.0.0.0:8545:8545"
|
||||
- "0.0.0.0:8546:8546"
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-vz", "localhost:8545"]
|
||||
interval: 30s
|
||||
|
@ -38,6 +38,7 @@ services:
|
||||
- fixturenet-eth-bootnode-geth
|
||||
ports:
|
||||
- "8545"
|
||||
- "8546"
|
||||
- "40000"
|
||||
- "6060"
|
||||
|
||||
@ -59,6 +60,9 @@ services:
|
||||
- fixturenet-eth-bootnode-geth
|
||||
volumes:
|
||||
- fixturenet_plugeth_geth_2_data:/root/ethdata
|
||||
ports:
|
||||
- "8545"
|
||||
- "8546"
|
||||
|
||||
fixturenet-eth-bootnode-lighthouse:
|
||||
restart: always
|
||||
|
@ -10,11 +10,11 @@ services:
|
||||
nitro-contracts:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
NITRO_CHAIN_URL: ${NITRO_CHAIN_URL:-ws://fixturenet-eth-geth-1:8546}
|
||||
NITRO_PK: ${NITRO_PK:-2d999770f7b5d49b694080f987b82bbc9fc9ac2b4dcc10b0f8aba7d700f69c6d}
|
||||
NITRO_CHAIN_PK: ${NITRO_CHAIN_PK:-570b909da9669b2f35a0b1ac70b8358516d55ae1b5b3710e95e9a94395090597}
|
||||
NITRO_USE_DURABLE_STORE: ${NITRO_USE_DURABLE_STORE:-true}
|
||||
NITRO_DURABLE_STORE_FOLDER: ${NITRO_DURABLE_STORE_FOLDER:-/app/data/nitro-store}
|
||||
CERC_NITRO_CHAIN_URL: ${CERC_NITRO_CHAIN_URL:-ws://fixturenet-eth-geth-1:8546}
|
||||
CERC_NITRO_PK: ${CERC_NITRO_PK:-2d999770f7b5d49b694080f987b82bbc9fc9ac2b4dcc10b0f8aba7d700f69c6d}
|
||||
CERC_NITRO_CHAIN_PK: ${CERC_NITRO_CHAIN_PK:-570b909da9669b2f35a0b1ac70b8358516d55ae1b5b3710e95e9a94395090597}
|
||||
CERC_NITRO_USE_DURABLE_STORE: ${CERC_NITRO_USE_DURABLE_STORE:-true}
|
||||
CERC_NITRO_DURABLE_STORE_FOLDER: ${CERC_NITRO_DURABLE_STORE_FOLDER:-/app/data/nitro-store}
|
||||
CERC_NA_ADDRESS: ${CERC_NA_ADDRESS}
|
||||
CERC_VPA_ADDRESS: ${CERC_VPA_ADDRESS}
|
||||
CERC_CA_ADDRESS: ${CERC_CA_ADDRESS}
|
||||
@ -31,9 +31,19 @@ services:
|
||||
start_period: 10s
|
||||
ports:
|
||||
- "3005"
|
||||
- "4005"
|
||||
- "4005:4005"
|
||||
- "5005:5005"
|
||||
|
||||
nitro-rpc-client:
|
||||
image: cerc/nitro-rpc-client:local
|
||||
hostname: nitro-rpc-client
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
# Wait for the go-nitro node to start
|
||||
go-nitro:
|
||||
condition: service_healthy
|
||||
command: ["bash", "-c", "tail -f /dev/null"]
|
||||
|
||||
volumes:
|
||||
go_nitro_data:
|
||||
nitro_deployment:
|
||||
|
@ -10,10 +10,10 @@ services:
|
||||
go-nitro:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PROXY_ADDRESS: 0.0.0.0:8081
|
||||
PROXY_NITRO_ENDPOINT: ${PROXY_NITRO_ENDPOINT:-go-nitro:4005/api/v1}
|
||||
PROXY_DESTINATION_URL: ${PROXY_DESTINATION_URL:-http://ipld-eth-server:8081}
|
||||
PROXY_COST_PER_BYTE: ${PROXY_COST_PER_BYTE:-1}
|
||||
CERC_PROXY_ADDRESS: 0.0.0.0:8081
|
||||
CERC_PROXY_NITRO_ENDPOINT: ${CERC_PROXY_NITRO_ENDPOINT:-go-nitro:4005/api/v1}
|
||||
CERC_PROXY_DESTINATION_URL: ${CERC_PROXY_DESTINATION_URL:-http://ipld-eth-server:8081}
|
||||
CERC_PROXY_COST_PER_BYTE: ${CERC_PROXY_COST_PER_BYTE:-1}
|
||||
entrypoint: ["bash", "-c", "/app/run-reverse-payment-proxy.sh"]
|
||||
volumes:
|
||||
- ../config/go-nitro/run-reverse-payment-proxy.sh:/app/run-reverse-payment-proxy.sh
|
||||
|
@ -1,16 +1,17 @@
|
||||
version: '3.7'
|
||||
|
||||
services:
|
||||
ponder-app:
|
||||
ponder-app-indexer:
|
||||
hostname: ponder-app-indexer
|
||||
restart: unless-stopped
|
||||
image: cerc/ponder:local
|
||||
working_dir: /app/examples/token-erc20
|
||||
environment:
|
||||
PONDER_CHAIN_ID: ${PONDER_CHAIN_ID:-99}
|
||||
PONDER_RPC_URL_1: ${PONDER_RPC_URL_1:-http://nitro-reverse-payment-proxy:8081}
|
||||
CERC_PONDER_NITRO_PK: ${CERC_PONDER_NITRO_PK:-58368d20ff12f17669c06158c21d885897aa56f9be430edc789614bf9851d53f}
|
||||
CERC_PONDER_NITRO_CHAIN_PK: ${CERC_PONDER_NITRO_CHAIN_PK:-fb1e9af328c283ca3e2486e7c24d13582b7912057d8b9542ff41503c85bc05c0}
|
||||
CERC_PONDER_NITRO_CHAIN_URL: ${CERC_PONDER_NITRO_CHAIN_URL:-http://fixturenet-eth-geth-1:8545}
|
||||
CERC_PONDER_CHAIN_ID: ${PONDER_CHAIN_ID:-99}
|
||||
CERC_PONDER_RPC_URL_1: ${PONDER_RPC_URL_1:-http://nitro-reverse-payment-proxy:8081}
|
||||
CERC_PONDER_NITRO_PK: ${CERC_PONDER_INDEXER_NITRO_PK:-58368d20ff12f17669c06158c21d885897aa56f9be430edc789614bf9851d53f}
|
||||
CERC_PONDER_NITRO_CHAIN_PK: ${CERC_PONDER_INDEXER_NITRO_CHAIN_PK:-fb1e9af328c283ca3e2486e7c24d13582b7912057d8b9542ff41503c85bc05c0}
|
||||
CERC_PONDER_NITRO_CHAIN_URL: ${CERC_PONDER_NITRO_CHAIN_URL:-http://fixturenet-eth-geth-1:8546}
|
||||
CERC_RELAY_MULTIADDR: ${CERC_RELAY_MULTIADDR}
|
||||
CERC_UPSTREAM_NITRO_ADDRESS: ${CERC_UPSTREAM_NITRO_ADDRESS:-0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE}
|
||||
CERC_UPSTREAM_NITRO_MULTIADDR: ${CERC_UPSTREAM_NITRO_MULTIADDR:-/dns4/go-nitro/tcp/5005/ws/p2p/16Uiu2HAmSjXJqsyBJgcBUU2HQmykxGseafSatbpq5471XmuaUqyv}
|
||||
@ -18,14 +19,45 @@ services:
|
||||
command: ["bash", "./ponder-start.sh"]
|
||||
volumes:
|
||||
- ../config/ponder/ponder-start.sh:/app/examples/token-erc20/ponder-start.sh
|
||||
- ../config/ponder/ponder.config.ts:/app/examples/token-erc20/ponder.config.ts
|
||||
- ../config/ponder/ponder.indexer.config.ts:/app/examples/token-erc20/ponder.config.ts
|
||||
- peers_ids:/peers
|
||||
- nitro_deployment:/nitro
|
||||
- ponder_nitro_data:/app/examples/token-erc20/.ponder/nitro-db
|
||||
- ponder_indexer_nitro_data:/app/examples/token-erc20/.ponder/nitro-db
|
||||
ports:
|
||||
- "42070"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
ponder-app-watcher:
|
||||
hostname: ponder-app-watcher
|
||||
depends_on:
|
||||
- ponder-app-indexer
|
||||
restart: unless-stopped
|
||||
image: cerc/ponder:local
|
||||
working_dir: /app/examples/token-erc20
|
||||
environment:
|
||||
CERC_PONDER_CHAIN_ID: ${PONDER_CHAIN_ID:-99}
|
||||
CERC_PONDER_NITRO_PK: ${CERC_PONDER_WATCHER_NITRO_PK:-febb3b74b0b52d0976f6571d555f4ac8b91c308dfa25c7b58d1e6a7c3f50c781}
|
||||
CERC_PONDER_NITRO_CHAIN_PK: ${CERC_PONDER_WATCHER_NITRO_CHAIN_PK:-be4aa664815ea3bc3d63118649a733f6c96b243744310806ecb6d96359ab62cf}
|
||||
CERC_PONDER_NITRO_CHAIN_URL: ${CERC_PONDER_NITRO_CHAIN_URL:-http://fixturenet-eth-geth-1:8546}
|
||||
CERC_RELAY_MULTIADDR: ${CERC_RELAY_MULTIADDR}
|
||||
CERC_INDEXER_GQL_ENDPOINT: ${CERC_INDEXER_GQL_ENDPOINT:-http://ponder-app-indexer:42070/graphql}
|
||||
CERC_INDEXER_NITRO_ADDRESS: ${CERC_INDEXER_NITRO_ADDRESS:-0x67D5b55604d1aF90074FcB69b8C51838FFF84f8d}
|
||||
CERC_INDEXER_NITRO_PAY_AMOUNT: ${CERC_INDEXER_NITRO_PAY_AMOUNT:-50}
|
||||
command: ["bash", "./ponder-start.sh"]
|
||||
volumes:
|
||||
- ../config/ponder/ponder-start.sh:/app/examples/token-erc20/ponder-start.sh
|
||||
- ../config/ponder/ponder.watcher.config.ts:/app/examples/token-erc20/ponder.config.ts
|
||||
- peers_ids:/peers
|
||||
- nitro_deployment:/nitro
|
||||
- ponder_watcher_nitro_data:/app/examples/token-erc20/.ponder/nitro-db
|
||||
ports:
|
||||
- "42069"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
volumes:
|
||||
peers_ids:
|
||||
nitro_deployment:
|
||||
ponder_nitro_data:
|
||||
ponder_indexer_nitro_data:
|
||||
ponder_watcher_nitro_data:
|
||||
|
@ -4,6 +4,7 @@ services:
|
||||
restart: always
|
||||
environment:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||
volumes:
|
||||
- test-data:/data
|
||||
ports:
|
||||
|
@ -74,6 +74,7 @@ services:
|
||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||
CERC_ETH_RPC_QUERY_ENDPOINT: ${CERC_ETH_RPC_QUERY_ENDPOINT}
|
||||
CERC_ETH_RPC_MUTATION_ENDPOINT: ${CERC_ETH_RPC_MUTATION_ENDPOINT}
|
||||
CERC_NITRO_CHAIN_URL: ${CERC_NITRO_CHAIN_URL}
|
||||
CERC_RELAY_PEERS: ${CERC_RELAY_PEERS}
|
||||
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
|
||||
CERC_PUBSUB: ${CERC_PUBSUB}
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
@ -30,6 +29,21 @@ fi
|
||||
|
||||
echo "Running Nitro node"
|
||||
|
||||
# TODO Wait for RPC endpoint to come up
|
||||
# Wait till chain endpoint is available
|
||||
retry_interval=5
|
||||
while true; do
|
||||
# Assuming CERC_NITRO_CHAIN_URL is of format <ws|http>://host:port
|
||||
ws_host=$(echo "$CERC_NITRO_CHAIN_URL" | awk -F '://' '{print $2}' | cut -d ':' -f 1)
|
||||
ws_port=$(echo "$CERC_NITRO_CHAIN_URL" | awk -F '://' '{print $2}' | cut -d ':' -f 2)
|
||||
nc -z -w 1 "$ws_host" "$ws_port"
|
||||
|
||||
./nitro -chainurl ${NITRO_CHAIN_URL} -msgport 3005 -rpcport 4005 -wsmsgport 5005 -pk ${NITRO_PK} -chainpk ${NITRO_CHAIN_PK} -naaddress ${NA_ADDRESS} -vpaaddress ${VPA_ADDRESS} -caaddress ${CA_ADDRESS} -usedurablestore ${NITRO_USE_DURABLE_STORE} -durablestorefolder ${NITRO_DURABLE_STORE_FOLDER}
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Chain endpoint is available"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Chain endpoint not yet available, retrying in $retry_interval seconds..."
|
||||
sleep $retry_interval
|
||||
done
|
||||
|
||||
./nitro -chainurl ${CERC_NITRO_CHAIN_URL} -msgport 3005 -rpcport 4005 -wsmsgport 5005 -pk ${CERC_NITRO_PK} -chainpk ${CERC_NITRO_CHAIN_PK} -naaddress ${NA_ADDRESS} -vpaaddress ${VPA_ADDRESS} -caaddress ${CA_ADDRESS} -usedurablestore ${CERC_NITRO_USE_DURABLE_STORE} -durablestorefolder ${CERC_NITRO_DURABLE_STORE_FOLDER}
|
||||
|
@ -6,9 +6,9 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
fi
|
||||
|
||||
echo "Running Nitro reverse payment proxy"
|
||||
echo "Using PROXY_ADDRESS ${PROXY_ADDRESS}"
|
||||
echo "Using PROXY_NITRO_ENDPOINT ${PROXY_NITRO_ENDPOINT}"
|
||||
echo "Using PROXY_DESTINATION_URL ${PROXY_DESTINATION_URL}"
|
||||
echo "Using PROXY_COST_PER_BYTE ${PROXY_COST_PER_BYTE}"
|
||||
echo "Using CERC_PROXY_ADDRESS ${CERC_PROXY_ADDRESS}"
|
||||
echo "Using CERC_PROXY_NITRO_ENDPOINT ${CERC_PROXY_NITRO_ENDPOINT}"
|
||||
echo "Using CERC_PROXY_DESTINATION_URL ${CERC_PROXY_DESTINATION_URL}"
|
||||
echo "Using CERC_PROXY_COST_PER_BYTE ${CERC_PROXY_COST_PER_BYTE}"
|
||||
|
||||
./start-reverse-payment-proxy -proxyaddress ${PROXY_ADDRESS} -nitroendpoint=${PROXY_NITRO_ENDPOINT} -destinationurl=${PROXY_DESTINATION_URL} -costperbyte ${PROXY_COST_PER_BYTE} -enablepaidrpcmethods
|
||||
./proxy -proxyaddress ${CERC_PROXY_ADDRESS} -nitroendpoint=${CERC_PROXY_NITRO_ENDPOINT} -destinationurl=${CERC_PROXY_DESTINATION_URL} -costperbyte ${CERC_PROXY_COST_PER_BYTE} -enablepaidrpcmethods
|
||||
|
@ -53,5 +53,5 @@ done
|
||||
|
||||
echo "Using CERC_PRIVATE_KEY_DEPLOYER from env"
|
||||
|
||||
yarn test:deploy-contracts --chainurl ${CERC_ETH_RPC_ENDPOINT} --key ${CERC_PRIVATE_KEY_DEPLOYER} --addressesFilePath ${nitro_addresses_file}
|
||||
yarn test:deploy-contracts --chainUrl ${CERC_ETH_RPC_ENDPOINT} --key ${CERC_PRIVATE_KEY_DEPLOYER} --addressesFilePath ${nitro_addresses_file}
|
||||
cat ${nitro_addresses_file}
|
||||
|
@ -5,19 +5,6 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
# Wait till RPC endpoint is available
|
||||
retry_interval=5
|
||||
while true; do
|
||||
rpc_response=$(curl -s -o /dev/null -w '%{http_code}' ${PONDER_RPC_URL_1})
|
||||
if [ ${rpc_response} = 200 ]; then
|
||||
echo "RPC endpoint is available"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "RPC endpoint not yet available, retrying in $retry_interval seconds..."
|
||||
sleep $retry_interval
|
||||
done
|
||||
|
||||
nitro_addresses_file="/nitro/nitro-addresses.json"
|
||||
nitro_addresses_destination_file="/app/examples/token-erc20/nitro-addresses.json"
|
||||
|
||||
@ -55,15 +42,22 @@ if [ -z "$CERC_RELAY_MULTIADDR" ]; then
|
||||
fi
|
||||
|
||||
env_file='.env.local'
|
||||
echo "PONDER_CHAIN_ID=\"$PONDER_CHAIN_ID\"" > "$env_file"
|
||||
echo "PONDER_RPC_URL_1=\"$PONDER_RPC_URL_1\"" >> "$env_file"
|
||||
echo "CERC_PONDER_NITRO_PK=\"$CERC_PONDER_NITRO_PK\"" >> "$env_file"
|
||||
echo "CERC_PONDER_NITRO_CHAIN_PK=\"$CERC_PONDER_NITRO_CHAIN_PK\"" >> "$env_file"
|
||||
echo "CERC_PONDER_NITRO_CHAIN_URL=\"$CERC_PONDER_NITRO_CHAIN_URL\"" >> "$env_file"
|
||||
echo "CERC_RELAY_MULTIADDR=\"$CERC_RELAY_MULTIADDR\"" >> "$env_file"
|
||||
echo "CERC_UPSTREAM_NITRO_ADDRESS=\"$CERC_UPSTREAM_NITRO_ADDRESS\"" >> "$env_file"
|
||||
echo "CERC_UPSTREAM_NITRO_MULTIADDR=\"$CERC_UPSTREAM_NITRO_MULTIADDR\"" >> "$env_file"
|
||||
echo "CERC_UPSTREAM_NITRO_PAY_AMOUNT=\"$CERC_UPSTREAM_NITRO_PAY_AMOUNT\"" >> "$env_file"
|
||||
echo "PONDER_TELEMETRY_DISABLED=true" > "$env_file"
|
||||
echo "PONDER_LOG_LEVEL=debug" >> "$env_file"
|
||||
echo "PONDER_CHAIN_ID=\"$CERC_PONDER_CHAIN_ID\"" >> "$env_file"
|
||||
echo "PONDER_RPC_URL_1=\"$CERC_PONDER_RPC_URL_1\"" >> "$env_file"
|
||||
echo "PONDER_NITRO_PK=\"$CERC_PONDER_NITRO_PK\"" >> "$env_file"
|
||||
echo "PONDER_NITRO_CHAIN_PK=\"$CERC_PONDER_NITRO_CHAIN_PK\"" >> "$env_file"
|
||||
echo "PONDER_NITRO_CHAIN_URL=\"$CERC_PONDER_NITRO_CHAIN_URL\"" >> "$env_file"
|
||||
echo "RELAY_MULTIADDR=\"$CERC_RELAY_MULTIADDR\"" >> "$env_file"
|
||||
echo "UPSTREAM_NITRO_ADDRESS=\"$CERC_UPSTREAM_NITRO_ADDRESS\"" >> "$env_file"
|
||||
echo "UPSTREAM_NITRO_MULTIADDR=\"$CERC_UPSTREAM_NITRO_MULTIADDR\"" >> "$env_file"
|
||||
echo "UPSTREAM_NITRO_PAY_AMOUNT=\"$CERC_UPSTREAM_NITRO_PAY_AMOUNT\"" >> "$env_file"
|
||||
echo "INDEXER_GQL_ENDPOINT=\"$CERC_INDEXER_GQL_ENDPOINT\"" >> "$env_file"
|
||||
echo "INDEXER_NITRO_ADDRESS=\"$CERC_INDEXER_NITRO_ADDRESS\"" >> "$env_file"
|
||||
echo "INDEXER_NITRO_PAY_AMOUNT=\"$CERC_INDEXER_NITRO_PAY_AMOUNT\"" >> "$env_file"
|
||||
|
||||
cat "$env_file"
|
||||
|
||||
# Keep the container running
|
||||
tail -f
|
||||
|
@ -1,37 +0,0 @@
|
||||
import type { Config } from "@ponder/core";
|
||||
|
||||
import contractAddresses from "./nitro-addresses.json";
|
||||
|
||||
export const config: Config = {
|
||||
networks: [
|
||||
{
|
||||
name: "fixturenet",
|
||||
chainId: Number(process.env.PONDER_CHAIN_ID),
|
||||
rpcUrl: process.env.PONDER_RPC_URL_1,
|
||||
maxRpcRequestConcurrency: 1,
|
||||
},
|
||||
],
|
||||
contracts: [
|
||||
{
|
||||
name: "AdventureGold",
|
||||
network: "fixturenet",
|
||||
abi: "./abis/AdventureGold.json",
|
||||
address: "0x32353A6C91143bfd6C7d363B546e62a9A2489A20",
|
||||
startBlock: 5,
|
||||
maxBlockRange: 100,
|
||||
},
|
||||
],
|
||||
nitro: {
|
||||
privateKey: process.env.CERC_PONDER_NITRO_PK!,
|
||||
chainPrivateKey: process.env.CERC_PONDER_NITRO_CHAIN_PK!,
|
||||
chainURL: process.env.CERC_PONDER_NITRO_CHAIN_URL!,
|
||||
contractAddresses,
|
||||
relayMultiAddr: process.env.CERC_RELAY_MULTIADDR!,
|
||||
store: "./.ponder/nitro-db",
|
||||
rpcNitroNode: {
|
||||
address: process.env.CERC_UPSTREAM_NITRO_ADDRESS!,
|
||||
multiAddr: process.env.CERC_UPSTREAM_NITRO_MULTIADDR!,
|
||||
},
|
||||
payAmount: process.env.CERC_UPSTREAM_NITRO_PAY_AMOUNT!,
|
||||
},
|
||||
};
|
53
app/data/config/ponder/ponder.indexer.config.ts
Normal file
53
app/data/config/ponder/ponder.indexer.config.ts
Normal file
@ -0,0 +1,53 @@
|
||||
import { type Config, AppMode } from "@ponder/core";
|
||||
|
||||
import contractAddresses from "./nitro-addresses.json" assert { type: "json" };
|
||||
|
||||
export const config: Config = {
|
||||
networks: [
|
||||
{
|
||||
name: "fixturenet",
|
||||
chainId: Number(process.env.PONDER_CHAIN_ID),
|
||||
rpcUrl: process.env.PONDER_RPC_URL_1,
|
||||
maxRpcRequestConcurrency: 1,
|
||||
pollingInterval: 5000,
|
||||
payments: {
|
||||
nitro: {
|
||||
address: process.env.UPSTREAM_NITRO_ADDRESS!,
|
||||
multiAddr: process.env.UPSTREAM_NITRO_MULTIADDR!,
|
||||
fundingAmounts: {
|
||||
// TODO: Pass amounts from env
|
||||
directFund: "1000000000000",
|
||||
virtualFund: "1000000000",
|
||||
},
|
||||
},
|
||||
paidRPCMethods: [
|
||||
"eth_getLogs",
|
||||
"eth_getBlockByNumber",
|
||||
"eth_getBlockByHash",
|
||||
],
|
||||
amount: process.env.UPSTREAM_NITRO_PAY_AMOUNT!,
|
||||
},
|
||||
},
|
||||
],
|
||||
contracts: [
|
||||
{
|
||||
name: "AdventureGold",
|
||||
network: "fixturenet",
|
||||
abi: "./abis/AdventureGold.json",
|
||||
address: "0x32353A6C91143bfd6C7d363B546e62a9A2489A20",
|
||||
startBlock: 5,
|
||||
maxBlockRange: 100,
|
||||
},
|
||||
],
|
||||
options: {
|
||||
mode: AppMode.Indexer,
|
||||
},
|
||||
nitro: {
|
||||
privateKey: process.env.PONDER_NITRO_PK!,
|
||||
chainPrivateKey: process.env.PONDER_NITRO_CHAIN_PK!,
|
||||
chainUrl: process.env.PONDER_NITRO_CHAIN_URL!,
|
||||
contractAddresses,
|
||||
relayMultiAddr: process.env.RELAY_MULTIADDR!,
|
||||
store: "./.ponder/nitro-db",
|
||||
},
|
||||
};
|
46
app/data/config/ponder/ponder.watcher.config.ts
Normal file
46
app/data/config/ponder/ponder.watcher.config.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { type Config, AppMode } from "@ponder/core";
|
||||
|
||||
import contractAddresses from "./nitro-addresses.json" assert { type: "json" };
|
||||
|
||||
export const config: Config = {
|
||||
networks: [
|
||||
{
|
||||
name: "fixturenet",
|
||||
chainId: Number(process.env.PONDER_CHAIN_ID),
|
||||
},
|
||||
],
|
||||
contracts: [
|
||||
{
|
||||
name: "AdventureGold",
|
||||
network: "fixturenet",
|
||||
abi: "./abis/AdventureGold.json",
|
||||
address: "0x32353A6C91143bfd6C7d363B546e62a9A2489A20",
|
||||
startBlock: 5,
|
||||
maxBlockRange: 100,
|
||||
},
|
||||
],
|
||||
options: {
|
||||
mode: AppMode.Watcher,
|
||||
},
|
||||
indexer: {
|
||||
gqlEndpoint: process.env.INDEXER_GQL_ENDPOINT,
|
||||
payments: {
|
||||
nitro: {
|
||||
address: process.env.INDEXER_NITRO_ADDRESS,
|
||||
fundingAmounts: {
|
||||
directFund: "1000000000000",
|
||||
virtualFund: "1000000000",
|
||||
},
|
||||
},
|
||||
amount: process.env.INDEXER_NITRO_PAY_AMOUNT,
|
||||
},
|
||||
},
|
||||
nitro: {
|
||||
privateKey: process.env.PONDER_NITRO_PK!,
|
||||
chainPrivateKey: process.env.PONDER_NITRO_CHAIN_PK!,
|
||||
chainUrl: process.env.PONDER_NITRO_CHAIN_URL!,
|
||||
contractAddresses,
|
||||
relayMultiAddr: process.env.RELAY_MULTIADDR!,
|
||||
store: "./.ponder/nitro-db",
|
||||
}
|
||||
};
|
@ -9,6 +9,9 @@ DEFAULT_CERC_ETH_RPC_QUERY_ENDPOINT="http://nitro-reverse-payment-proxy:8081"
|
||||
# ETH RPC endpoint used for mutations in the watcher
|
||||
DEFAULT_CERC_ETH_RPC_MUTATION_ENDPOINT="http://fixturenet-eth-geth-1:8545"
|
||||
|
||||
# ETH endpoint used by watcher's Nitro node
|
||||
DEFAULT_CERC_NITRO_CHAIN_URL="http://fixturenet-eth-geth-1:8546"
|
||||
|
||||
# Set of relay peers to connect to from the relay node
|
||||
DEFAULT_CERC_RELAY_PEERS=[]
|
||||
|
||||
|
@ -7,6 +7,7 @@ fi
|
||||
|
||||
CERC_ETH_RPC_QUERY_ENDPOINT="${CERC_ETH_RPC_QUERY_ENDPOINT:-${DEFAULT_CERC_ETH_RPC_QUERY_ENDPOINT}}"
|
||||
CERC_ETH_RPC_MUTATION_ENDPOINT="${CERC_ETH_RPC_MUTATION_ENDPOINT:-${DEFAULT_CERC_ETH_RPC_MUTATION_ENDPOINT}}"
|
||||
CERC_NITRO_CHAIN_URL="${CERC_NITRO_CHAIN_URL:-${DEFAULT_CERC_NITRO_CHAIN_URL}}"
|
||||
CERC_RELAY_PEERS="${CERC_RELAY_PEERS:-${DEFAULT_CERC_RELAY_PEERS}}"
|
||||
CERC_DENY_MULTIADDRS="${CERC_DENY_MULTIADDRS:-${DEFAULT_CERC_DENY_MULTIADDRS}}"
|
||||
CERC_PUBSUB="${CERC_PUBSUB:-${DEFAULT_CERC_PUBSUB}}"
|
||||
@ -19,6 +20,7 @@ watcher_keys_dir="./keys"
|
||||
|
||||
echo "Using RPC query endpoint ${CERC_ETH_RPC_QUERY_ENDPOINT}"
|
||||
echo "Using RPC mutation endpoint ${CERC_ETH_RPC_MUTATION_ENDPOINT}"
|
||||
echo "Using Nitro chain URL ${CERC_NITRO_CHAIN_URL}"
|
||||
|
||||
# Use public domain for relay multiaddr in peer config if specified
|
||||
# Otherwise, use the docker container's host IP
|
||||
@ -146,6 +148,7 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||
s/REPLACE_WITH_CERC_PRIVATE_KEY_PEER/${CERC_PRIVATE_KEY_PEER}/g; \
|
||||
s/REPLACE_WITH_CERC_WATCHER_NITRO_PK/${CERC_WATCHER_NITRO_PK}/g; \
|
||||
s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/g; \
|
||||
s|REPLACE_WITH_CERC_NITRO_CHAIN_URL|${CERC_NITRO_CHAIN_URL}|g; \
|
||||
s/REPLACE_WITH_CONSENSUS_ENABLED/${CONSENSUS_ENABLED}/g; \
|
||||
s/REPLACE_WITH_CONSENSUS_PUBLIC_KEY/${CONSENSUS_PUBLIC_KEY}/g; \
|
||||
s/REPLACE_WITH_CONSENSUS_PRIVATE_KEY/${CONSENSUS_PRIVATE_KEY}/g; \
|
||||
|
@ -53,6 +53,7 @@
|
||||
contractAddress = 'REPLACE_WITH_CONTRACT_ADDRESS'
|
||||
|
||||
[server.p2p.nitro]
|
||||
chainUrl = 'REPLACE_WITH_CERC_NITRO_CHAIN_URL'
|
||||
store = './out/nitro-db'
|
||||
privateKey = 'REPLACE_WITH_CERC_WATCHER_NITRO_PK'
|
||||
chainPrivateKey = 'REPLACE_WITH_CERC_PRIVATE_KEY_PEER'
|
||||
@ -93,14 +94,21 @@
|
||||
|
||||
[upstream]
|
||||
[upstream.ethServer]
|
||||
gqlApiEndpoint = 'http://ipld-eth-server:8083/graphql'
|
||||
gqlApiEndpoint = "http://ipld-eth-server:8083/graphql"
|
||||
rpcProviderEndpoint = 'REPLACE_WITH_CERC_ETH_RPC_QUERY_ENDPOINT'
|
||||
rpcProviderMutationEndpoint = 'REPLACE_WITH_CERC_ETH_RPC_MUTATION_ENDPOINT'
|
||||
|
||||
[upstream.ethServer.rpcProviderNitroNode]
|
||||
[upstream.ethServer.payments]
|
||||
paidRPCMethods = ["eth_getBlockByHash", "eth_getBlockByNumber", "eth_getStorageAt"]
|
||||
amount = 'REPLACE_WITH_UPSTREAM_NITRO_PAY_AMOUNT'
|
||||
|
||||
[upstream.ethServer.payments.nitro]
|
||||
address = 'REPLACE_WITH_UPSTREAM_NITRO_ADDRESS'
|
||||
multiAddr = 'REPLACE_WITH_UPSTREAM_NITRO_MULTIADDR'
|
||||
amount = 'REPLACE_WITH_UPSTREAM_NITRO_PAY_AMOUNT'
|
||||
|
||||
[upstream.ethServer.payments.nitro.fundingAmounts]
|
||||
directFund = "1000000000000"
|
||||
virtualFund = "1000000000"
|
||||
|
||||
[upstream.cache]
|
||||
name = "requests"
|
||||
|
@ -13,6 +13,7 @@ NOW=${1:-`date +%s`}
|
||||
|
||||
lcli \
|
||||
change-genesis-time \
|
||||
--testnet-dir $TESTNET_DIR \
|
||||
$TESTNET_DIR/genesis.ssz \
|
||||
$NOW
|
||||
|
||||
|
@ -7,6 +7,6 @@ WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
RUN echo "Installing dependencies" && \
|
||||
yarn
|
||||
yarn && yarn build:node
|
||||
|
||||
WORKDIR /app/packages/nitro-util
|
||||
WORKDIR /app/packages/nitro-node
|
||||
|
12
app/data/container-build/cerc-nitro-rpc-client/Dockerfile
Normal file
12
app/data/container-build/cerc-nitro-rpc-client/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
FROM node:18.17.1-alpine3.18
|
||||
|
||||
RUN apk --update --no-cache add python3 alpine-sdk bash curl jq
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN echo "Installing dependencies" && \
|
||||
yarn
|
||||
|
||||
RUN cd packages/nitro-rpc-client
|
9
app/data/container-build/cerc-nitro-rpc-client/build.sh
Executable file
9
app/data/container-build/cerc-nitro-rpc-client/build.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/nitro-rpc-client
|
||||
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
|
||||
# See: https://stackoverflow.com/a/246128/1701505
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
docker build -t cerc/nitro-rpc-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/go-nitro
|
@ -1,10 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/plugeth-statediff
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
# This container build currently requires access to private dependencies in gitea
|
||||
# so we check that the necessary access token has been supplied here, then pass it o the build
|
||||
if [[ -z "${CERC_GO_AUTH_TOKEN}" ]]; then
|
||||
echo "ERROR: CERC_GO_AUTH_TOKEN is not set" >&2
|
||||
exit 1
|
||||
# Pass Go auth token if present
|
||||
if [[ -n "${CERC_GO_AUTH_TOKEN}" ]]; then
|
||||
build_command_args="${build_command_args} --build-arg GIT_VDBTO_TOKEN=${CERC_GO_AUTH_TOKEN}"
|
||||
fi
|
||||
docker build -t cerc/plugeth-statediff:local ${build_command_args} --build-arg GIT_VDBTO_TOKEN=${CERC_GO_AUTH_TOKEN} ${CERC_REPO_BASE_DIR}/plugeth-statediff
|
||||
docker build -t cerc/plugeth-statediff:local ${build_command_args} ${CERC_REPO_BASE_DIR}/plugeth-statediff
|
||||
|
@ -1,10 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build cerc/plugeth
|
||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||
# This container build currently requires access to private dependencies in gitea
|
||||
# so we check that the necessary access token has been supplied here, then pass it o the build
|
||||
if [[ -z "${CERC_GO_AUTH_TOKEN}" ]]; then
|
||||
echo "ERROR: CERC_GO_AUTH_TOKEN is not set" >&2
|
||||
exit 1
|
||||
# Pass Go auth token if present
|
||||
if [[ -n "${CERC_GO_AUTH_TOKEN}" ]]; then
|
||||
build_command_args="${build_command_args} --build-arg GIT_VDBTO_TOKEN=${CERC_GO_AUTH_TOKEN}"
|
||||
fi
|
||||
docker build -t cerc/plugeth:local ${build_command_args} --build-arg GIT_VDBTO_TOKEN=${CERC_GO_AUTH_TOKEN} ${CERC_REPO_BASE_DIR}/plugeth
|
||||
docker build -t cerc/plugeth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/plugeth
|
||||
|
@ -14,6 +14,8 @@ else
|
||||
echo "Filesystem is fresh"
|
||||
echo `date` > $EXISTSFILENAME
|
||||
fi
|
||||
|
||||
if [ -n "$CERC_TEST_PARAM_1" ]; then
|
||||
echo "Test-param-1: ${CERC_TEST_PARAM_1}"
|
||||
fi
|
||||
# Run nginx which will block here forever
|
||||
/usr/sbin/nginx -g "daemon off;"
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM node:16.17.1-alpine3.16
|
||||
FROM node:18.17.1-alpine3.18
|
||||
|
||||
RUN apk --update --no-cache add git python3 alpine-sdk jq
|
||||
|
||||
|
@ -14,6 +14,7 @@ cerc/laconic-registry-cli
|
||||
cerc/laconic-console-host
|
||||
cerc/fixturenet-eth-geth
|
||||
cerc/fixturenet-eth-lighthouse
|
||||
cerc/fixturenet-eth-genesis
|
||||
cerc/watcher-ts
|
||||
cerc/watcher-mobymask
|
||||
cerc/watcher-erc20
|
||||
@ -55,3 +56,4 @@ cerc/go-nitro
|
||||
cerc/nitro-contracts
|
||||
cerc/mobymask-snap
|
||||
cerc/ponder
|
||||
cerc/nitro-rpc-client
|
||||
|
@ -1,7 +1,7 @@
|
||||
github.com/cerc-io/ipld-eth-db
|
||||
github.com/cerc-io/go-ethereum
|
||||
github.com/cerc-io/ipld-eth-server
|
||||
github.com/cerc-io/eth-statediff-service
|
||||
git.vdb.to/cerc-io/ipld-eth-db
|
||||
git.vdb.to/cerc-io/go-ethereum
|
||||
git.vdb.to/cerc-io/ipld-eth-server
|
||||
git.vdb.to/cerc-io/eth-statediff-service
|
||||
github.com/cerc-io/eth-statediff-fill-service
|
||||
github.com/cerc-io/ipld-eth-db-validator
|
||||
github.com/cerc-io/ipld-eth-beacon-indexer
|
||||
@ -18,7 +18,7 @@ github.com/vulcanize/uniswap-watcher-ts
|
||||
github.com/vulcanize/uniswap-v3-info
|
||||
github.com/vulcanize/assemblyscript
|
||||
github.com/cerc-io/eth-probe
|
||||
github.com/cerc-io/tx-spammer
|
||||
git.vdb.to/cerc-io/tx-spammer
|
||||
github.com/dboreham/foundry
|
||||
github.com/lirewine/gem
|
||||
github.com/lirewine/debug
|
||||
@ -30,7 +30,7 @@ github.com/ethereum-optimism/optimism
|
||||
github.com/pokt-network/pocket-core
|
||||
github.com/pokt-network/pocket-core-deployments
|
||||
github.com/cerc-io/azimuth-watcher-ts
|
||||
github.com/cerc-io/ipld-eth-state-snapshot
|
||||
git.vdb.to/cerc-io/ipld-eth-state-snapshot
|
||||
github.com/cerc-io/gelato-watcher-ts
|
||||
github.com/filecoin-project/lotus
|
||||
git.vdb.to/cerc-io/test-project
|
||||
|
@ -2,10 +2,10 @@ version: "1.0"
|
||||
name: chain-chunker
|
||||
description: "Stack to build containers for chain-chunker"
|
||||
repos:
|
||||
- github.com/cerc-io/ipld-eth-state-snapshot@v5
|
||||
- github.com/cerc-io/eth-statediff-service@v5
|
||||
- github.com/cerc-io/ipld-eth-db@v5
|
||||
- github.com/cerc-io/ipld-eth-server@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-state-snapshot@v5
|
||||
- git.vdb.to/cerc-io/eth-statediff-service@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v5
|
||||
containers:
|
||||
- cerc/ipld-eth-state-snapshot
|
||||
- cerc/eth-statediff-service
|
||||
|
@ -1,9 +1,9 @@
|
||||
version: "1.0"
|
||||
name: erc20-watcher
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/ipld-eth-db
|
||||
- github.com/cerc-io/ipld-eth-server
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v1.11.6-statediff-v5
|
||||
- github.com/cerc-io/watcher-ts
|
||||
- github.com/dboreham/foundry
|
||||
containers:
|
||||
|
@ -1,9 +1,9 @@
|
||||
version: "1.0"
|
||||
name: erc721-watcher
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/ipld-eth-db
|
||||
- github.com/cerc-io/ipld-eth-server
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v1.11.6-statediff-v5
|
||||
- github.com/cerc-io/watcher-ts
|
||||
containers:
|
||||
- cerc/go-ethereum
|
||||
|
@ -2,11 +2,11 @@ version: "1.0"
|
||||
name: fixturenet-eth-loaded
|
||||
description: "Loaded Ethereum Fixturenet"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/tx-spammer
|
||||
- github.com/cerc-io/ipld-eth-server
|
||||
- github.com/cerc-io/ipld-eth-db
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/tx-spammer
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
containers:
|
||||
- cerc/go-ethereum
|
||||
- cerc/lighthouse
|
||||
|
@ -2,10 +2,10 @@ version: "1.2"
|
||||
name: fixturenet-eth-tx
|
||||
description: "Ethereum Fixturenet w/ tx-spammer"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/tx-spammer
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/tx-spammer
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/dboreham/foundry
|
||||
- github.com/cerc-io/lighthouse
|
||||
containers:
|
||||
- cerc/go-ethereum
|
||||
- cerc/lighthouse
|
||||
|
@ -66,7 +66,7 @@ It is not necessary to use them all at once, but a complete example follows:
|
||||
|
||||
```
|
||||
# Setup
|
||||
$ laconic-so setup-repositories --include github.com/cerc-io/go-ethereum,github.com/cerc-io/ipld-eth-db,github.com/cerc-io/ipld-eth-server,github.com/cerc-io/ipld-eth-beacon-db,github.com/cerc-io/ipld-eth-beacon-indexer,github.com/cerc-io/eth-probe,github.com/cerc-io/tx-spammer
|
||||
$ laconic-so setup-repositories --include git.vdb.to/cerc-io/go-ethereum,git.vdb.to/cerc-io/ipld-eth-db,git.vdb.to/cerc-io/ipld-eth-server,github.com/cerc-io/ipld-eth-beacon-db,github.com/cerc-io/ipld-eth-beacon-indexer,github.com/cerc-io/eth-probe,git.vdb.to/cerc-io/tx-spammer
|
||||
|
||||
# Build
|
||||
$ laconic-so build-containers --include cerc/go-ethereum,cerc/lighthouse,cerc/fixturenet-eth-geth,cerc/fixturenet-eth-lighthouse,cerc/ipld-eth-db,cerc/ipld-eth-server,cerc/ipld-eth-beacon-db,cerc/ipld-eth-beacon-indexer,cerc/eth-probe,cerc/keycloak,cerc/tx-spammer
|
||||
|
@ -2,8 +2,8 @@ version: "1.1"
|
||||
name: fixturenet-eth
|
||||
description: "Ethereum Fixturenet"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/dboreham/foundry
|
||||
containers:
|
||||
- cerc/go-ethereum
|
||||
|
@ -9,7 +9,7 @@ Prerequisite: An L1 Ethereum RPC endpoint
|
||||
Clone required repositories:
|
||||
|
||||
```bash
|
||||
laconic-so --stack fixturenet-optimism setup-repositories --exclude github.com/cerc-io/go-ethereum
|
||||
laconic-so --stack fixturenet-optimism setup-repositories --exclude git.vdb.to/cerc-io/go-ethereum
|
||||
|
||||
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
|
||||
```
|
||||
|
@ -2,8 +2,8 @@ version: "1.0"
|
||||
name: fixturenet-optimism
|
||||
description: "Optimism Fixturenet"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/dboreham/foundry
|
||||
- github.com/ethereum-optimism/optimism@v1.0.4
|
||||
- github.com/ethereum-optimism/op-geth@v1.101105.2
|
||||
|
@ -1,25 +0,0 @@
|
||||
# Required for:
|
||||
# Nitro contracts deployment
|
||||
# MobyMask contract deployment
|
||||
CERC_PRIVATE_KEY_DEPLOYER="0x888814df89c4358d7ddb3fa4b0213e7331239a80e1f013eaa7b2deca2a41a218"
|
||||
|
||||
# ipld-eth-server's go-nitro node credentials
|
||||
NITRO_PK=2d999770f7b5d49b694080f987b82bbc9fc9ac2b4dcc10b0f8aba7d700f69c6d
|
||||
NITRO_CHAIN_PK=570b909da9669b2f35a0b1ac70b8358516d55ae1b5b3710e95e9a94395090597
|
||||
|
||||
# Watcher's nitro node credentials
|
||||
CERC_WATCHER_NITRO_PK="0279651921cd800ac560c21ceea27aab0107b67daf436cdd25ce84cad30159b4"
|
||||
|
||||
# Used for sending MobyMask chain txs; also serves as chain pk for watcher's nitro node
|
||||
CERC_PRIVATE_KEY_PEER="111b7500bdce494d6f4bcfe8c2a0dde2ef92f751d9070fac6475dbd6d8021b3f"
|
||||
|
||||
# Ponder app's nitro node credentials
|
||||
CERC_PONDER_NITRO_PK=58368d20ff12f17669c06158c21d885897aa56f9be430edc789614bf9851d53f
|
||||
CERC_PONDER_NITRO_CHAIN_PK=fb1e9af328c283ca3e2486e7c24d13582b7912057d8b9542ff41503c85bc05c0
|
||||
|
||||
# Used by watcher and ponder app for sending upstream payments
|
||||
CERC_UPSTREAM_NITRO_ADDRESS="0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE" # corresponds to NITRO_PK
|
||||
CERC_UPSTREAM_NITRO_MULTIADDR="/dns4/go-nitro/tcp/5005/ws/p2p/16Uiu2HAmSjXJqsyBJgcBUU2HQmykxGseafSatbpq5471XmuaUqyv"
|
||||
|
||||
# Used by the the MobyMask app to make payments to watcher
|
||||
CERC_PAYMENT_NITRO_ADDRESS="0xBBB676f9cFF8D242e9eaC39D063848807d3D1D94" # corresponds to CERC_WATCHER_NITRO_PK
|
@ -1,5 +1,7 @@
|
||||
# fixturenet-payments
|
||||
|
||||
Instructions to setup and deploy an end-to-end fixturenet-payments stack
|
||||
|
||||
## Setup
|
||||
|
||||
Clone required repositories:
|
||||
@ -16,15 +18,14 @@ laconic-so --stack fixturenet-payments build-containers
|
||||
|
||||
## Deploy
|
||||
|
||||
### Configuration
|
||||
|
||||
Deploy the stack:
|
||||
|
||||
```bash
|
||||
laconic-so --stack fixturenet-payments deploy --cluster payments up
|
||||
|
||||
# Exposed on host ports:
|
||||
# 5005: go-nitro node's p2p msg port
|
||||
# 4005: go-nitro node's RPC endpoint
|
||||
# 5005: go-nitro node's p2p endpoint
|
||||
# 8081: reverse payment proxy's RPC endpoint
|
||||
# 15432: MobyMask v3 watcher's db endpoint
|
||||
# 3001: MobyMask v3 watcher endpoint
|
||||
@ -33,23 +34,9 @@ laconic-so --stack fixturenet-payments deploy --cluster payments up
|
||||
# 3004: MobyMask v3 app
|
||||
```
|
||||
|
||||
Check the logs of the MobyMask contract deployment container to get the deployed contract's address and generated root invite link:
|
||||
## Demo
|
||||
|
||||
```bash
|
||||
docker logs -f $(docker ps -aq --filter name="mobymask-1")
|
||||
```
|
||||
|
||||
Check the reverse payment proxy container logs:
|
||||
|
||||
```bash
|
||||
docker logs -f $(docker ps -aq --filter name="nitro-reverse-payment-proxy")
|
||||
```
|
||||
|
||||
Run the ponder app:
|
||||
|
||||
```bash
|
||||
docker exec -it payments-ponder-app-1 bash -c "pnpm start"
|
||||
```
|
||||
Follow the [demo](./demo.md) to try out end-to-end payments
|
||||
|
||||
## Clean up
|
||||
|
||||
|
310
app/data/stacks/fixturenet-payments/demo.md
Normal file
310
app/data/stacks/fixturenet-payments/demo.md
Normal file
@ -0,0 +1,310 @@
|
||||
# Demo
|
||||
|
||||
Stack components:
|
||||
* `ipld-eth-db` database for statediffed data
|
||||
* Local geth + lighthouse blockchain "fixturenet" running in statediffing mode
|
||||
* `ipld-eth-server` which runs an ETH RPC API and a GQL server; serves data from `ipld-eth-db`
|
||||
* A go-nitro deployment acting as the Nitro node for `ipld-eth-server`
|
||||
* A modified reverse payment proxy server (based on the one from go-nitro) that proxies requests to `ipld-eth-server`'s RPC endpoint; it talks to `ipld-eth-server`'s Nitro node to accept and validate payments required for configured RPC requests
|
||||
* A MobyMask v3 watcher that pays the `ipld-eth-server` for ETH RPC requests
|
||||
* A MobyMask v3 app that pays the watcher for reads (GQL queries) and writes
|
||||
* An example ERC20 Ponder app that pays the `ipld-eth-server` for ETH RPC requests
|
||||
|
||||
## Setup
|
||||
|
||||
* On starting the stack, MobyMask watcher creates a payment channel with the `ipld-eth-server`'s Nitro node. Check watcher logs and wait for the same:
|
||||
|
||||
```bash
|
||||
docker logs -f $(docker ps -aq --filter name="mobymask-watcher-server")
|
||||
|
||||
# Expected output:
|
||||
# vulcanize:server Peer ID: 12D3KooWKLqLWU82VU7jmsmQMruRvZWhoBoVsf1UHchM5Nuq9ymY
|
||||
# vulcanize:server Using chain URL http://fixturenet-eth-geth-1:8546 for Nitro node
|
||||
# ...
|
||||
# ts-nitro:util:nitro Ledger channel created with id 0x65703ccdfacab09ac35367bdbe6c5a337e7a6651aad526807607b1c59b28bc1e
|
||||
# ...
|
||||
# ts-nitro:util:nitro Virtual payment channel created with id 0x29ff1335d73391a50e8fde3e9b34f00c3d81c39ddc7f89187f44dd51df96140e
|
||||
# vulcanize:server Starting server... +0ms
|
||||
```
|
||||
|
||||
* Keep the above command running to keep track of incoming payments and GQL requests from the MobyMask app
|
||||
|
||||
* In another terminal, export the payment channel id to a variable:
|
||||
|
||||
```bash
|
||||
export WATCHER_UPSTREAM_PAYMENT_CHANNEL=<PAYMENT_CHANNEL_ID>
|
||||
```
|
||||
|
||||
* Check the payment channel status:
|
||||
|
||||
```bash
|
||||
docker exec payments-nitro-rpc-client-1 npm exec -c "nitro-rpc-client get-payment-channel $WATCHER_UPSTREAM_PAYMENT_CHANNEL -h go-nitro -p 4005"
|
||||
|
||||
# Expected output:
|
||||
# {
|
||||
# ID: '0x8c0d17639bd2ba07dbcd248304a8f3c6c7276bfe25c2b87fe41f461e20f33f01',
|
||||
# Status: 'Open',
|
||||
# Balance: {
|
||||
# AssetAddress: '0x0000000000000000000000000000000000000000',
|
||||
# Payee: '0xaaa6628ec44a8a742987ef3a114ddfe2d4f7adce',
|
||||
# Payer: '0xbbb676f9cff8d242e9eac39d063848807d3d1d94',
|
||||
# PaidSoFar: 0n,
|
||||
# RemainingFunds: 1000000000n
|
||||
# }
|
||||
# }
|
||||
```
|
||||
|
||||
* In another terminal, check the reverse payment proxy server's logs to keep track of incoming payments and RPC requests:
|
||||
|
||||
```bash
|
||||
docker logs -f $(docker ps -aq --filter name="nitro-reverse-payment-proxy")
|
||||
```
|
||||
|
||||
* MetaMask flask wallet setup for running the MobyMask app:
|
||||
|
||||
* Get the geth node’s port mapped to host:
|
||||
|
||||
```bash
|
||||
docker port payments-fixturenet-eth-geth-1-1 8545
|
||||
```
|
||||
|
||||
* In MetaMask, add a custom network with the following settings:
|
||||
|
||||
```bash
|
||||
# Network name
|
||||
Local fixturenet
|
||||
|
||||
# New RPC URL
|
||||
http://127.0.0.1:<GETH_PORT>
|
||||
|
||||
# Chain ID
|
||||
1212
|
||||
|
||||
# Currency symbol
|
||||
ETH
|
||||
```
|
||||
|
||||
* Import a faucet account with the following private key:
|
||||
|
||||
```bash
|
||||
# Faucet PK
|
||||
# 0x570b909da9669b2f35a0b1ac70b8358516d55ae1b5b3710e95e9a94395090597
|
||||
```
|
||||
|
||||
* Create an additional account for usage in the app; fund it from the faucet account
|
||||
|
||||
* Get the generated root invite link for MobyMask from contract deployment container logs:
|
||||
|
||||
```bash
|
||||
docker logs -f $(docker ps -aq --filter name="mobymask-1")
|
||||
|
||||
# Expected output:
|
||||
# ...
|
||||
# "key": "0x60e706fda4639fe0a8eb102cb0ce81231cf6e819f41cb4eadf72d865ea4c11ad"
|
||||
# }
|
||||
# http://127.0.0.1:3004/#/members?invitation=<INVITATION>
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
### MobyMask App
|
||||
|
||||
* Open app in a browser (where MetaMask was setup) using the invite link
|
||||
|
||||
* Run the following in browser console to enable logs:
|
||||
|
||||
```bash
|
||||
localStorage.debug = 'ts-nitro:*'
|
||||
# Refresh the tab for taking effect
|
||||
```
|
||||
|
||||
* In the app’s debug panel, check that the peer gets connected to relay node and watcher peer
|
||||
|
||||
* Open the `NITRO` tab in debug panel
|
||||
* Click on `Connect Wallet` to connect to MetaMask (make sure that the newly funded account is active)
|
||||
* Click on `Connect Snap` to install/connect snap
|
||||
|
||||
* Perform `DIRECT FUND` with the preset amount and wait for the MetaMask confirmation prompt to appear; confirm the transaction and wait for a ledger channel to be created with the watcher
|
||||
|
||||
* Perform `VIRTUAL FUND` with amount set to `10000` and wait for a payment channel to be created with the watcher
|
||||
|
||||
* Perform phisher status check queries now that a payment channel is created:
|
||||
|
||||
* Check the watcher logs for received payments along with the GQL queries:
|
||||
|
||||
```bash
|
||||
# Expected output:
|
||||
# ...
|
||||
# laconic:payments Serving a paid query for 0x86804299822212c070178B5135Ba6DdAcFC357D3
|
||||
# vulcanize:resolver isPhisher 0x98ae4f9e9d01cc892adfe6871e1db0287039e0c183d3b5bb31d724228c114744 0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9 TWT:ash1
|
||||
# vulcanize:indexer isPhisher: db miss, fetching from upstream server
|
||||
# laconic:payments Making RPC call: eth_chainId
|
||||
# laconic:payments Making RPC call: eth_getBlockByHash
|
||||
# laconic:payments Making RPC call: eth_chainId
|
||||
# laconic:payments Making RPC call: eth_getStorageAt
|
||||
```
|
||||
|
||||
* The watcher makes several ETH RPC requests to `ipld-eth-server` to fetch data required for satisfying the GQL request(s); check the payment proxy server logs for charged RPC requests (`eth_getBlockByHash`, `eth_getBlockByNumber`, `eth_getStorageAt`):
|
||||
|
||||
```bash
|
||||
# Expected output:
|
||||
# ...
|
||||
# {"time":"2023-10-06T06:46:52.769009314Z","level":"DEBUG","msg":"Serving RPC request","method":"eth_chainId"}
|
||||
# {"time":"2023-10-06T06:46:52.773006426Z","level":"DEBUG","msg":"Serving RPC request","method":"eth_getBlockByNumber"}
|
||||
# {"time":"2023-10-06T06:46:52.811142054Z","level":"DEBUG","msg":"Request cost","cost-per-byte":1,"response-length":1480,"cost":1480,"method":"eth_getBlockByNumber"}
|
||||
# {"time":"2023-10-06T06:46:52.811418494Z","level":"DEBUG","msg":"sent message","address":"0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE","method":"receive_voucher"}
|
||||
# {"time":"2023-10-06T06:46:52.812557482Z","level":"DEBUG","msg":"Received voucher","delta":5000}
|
||||
# ...
|
||||
# {"time":"2023-10-06T06:46:52.87525215Z","level":"DEBUG","msg":"Serving RPC request","method":"eth_getStorageAt"}
|
||||
# {"time":"2023-10-06T06:46:52.882859654Z","level":"DEBUG","msg":"Request cost","cost-per-byte":1,"response-length":104,"cost":104,"method":"eth_getStorageAt"}
|
||||
# {"time":"2023-10-06T06:46:52.882946485Z","level":"DEBUG","msg":"sent message","address":"0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE","method":"receive_voucher"}
|
||||
# {"time":"2023-10-06T06:46:52.884012641Z","level":"DEBUG","msg":"Received voucher","delta":5000}
|
||||
# {"time":"2023-10-06T06:46:52.884032961Z","level":"DEBUG","msg":"Destination request","url":"http://ipld-eth-server:8081/"}
|
||||
```
|
||||
|
||||
* Change the amount besides `PAY` button in debug panel to `>=100` for phisher reports next
|
||||
|
||||
* Perform a phisher report and check the watcher logs for received payments:
|
||||
|
||||
```bash
|
||||
# Expected output:
|
||||
# ...
|
||||
# vulcanize:libp2p-utils [6:50:2] Received a message on mobymask P2P network from peer: 12D3KooWRkxV9SX8uTUZYkbRjai4Fsn7yavB61J5TMnksixsabsP
|
||||
# ts-nitro:engine {"msg":"Received message","_msg":{"to":"0xBBB676","from":"0x868042","payloadSummaries":[],"proposalSummaries":[],"payments":[{"amount":200,"channelId":"0x557153d729cf3323c0bdb40a36b245f98c2d4562933ba2182c9d61c5cfeda948"}],"rejectedObjectives":[]}}
|
||||
# laconic:payments Received a payment voucher of 100 from 0x86804299822212c070178B5135Ba6DdAcFC357D3
|
||||
# vulcanize:libp2p-utils Payment received for a mutation request from 0x86804299822212c070178B5135Ba6DdAcFC357D3
|
||||
# vulcanize:libp2p-utils Transaction receipt for invoke message {
|
||||
# to: '0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9',
|
||||
# blockNumber: 232,
|
||||
# blockHash: '0x6a188722c102662ea48af3786fe9db0d4b6c7ab7b27473eb0e628cf95746a244',
|
||||
# transactionHash: '0x6521205db8a905b3222adc2b6855f9b2abc72580624d299bec2a35bcba173efa',
|
||||
# effectiveGasPrice: '1500000007',
|
||||
# gasUsed: '113355'
|
||||
# }
|
||||
```
|
||||
|
||||
* Check the watcher - ipld-eth-server payment channel status after a few requests:
|
||||
|
||||
```bash
|
||||
docker exec payments-nitro-rpc-client-1 npm exec -c "nitro-rpc-client get-payment-channel $WATCHER_UPSTREAM_PAYMENT_CHANNEL -h go-nitro -p 4005"
|
||||
|
||||
# Expected output ('PaidSoFar' should be non zero):
|
||||
# {
|
||||
# ID: '0x8c0d17639bd2ba07dbcd248304a8f3c6c7276bfe25c2b87fe41f461e20f33f01',
|
||||
# Status: 'Open',
|
||||
# Balance: {
|
||||
# AssetAddress: '0x0000000000000000000000000000000000000000',
|
||||
# Payee: '0xaaa6628ec44a8a742987ef3a114ddfe2d4f7adce',
|
||||
# Payer: '0xbbb676f9cff8d242e9eac39d063848807d3d1d94',
|
||||
# PaidSoFar: 30000n,
|
||||
# RemainingFunds: 999970000n
|
||||
# }
|
||||
# }
|
||||
```
|
||||
|
||||
### ERC20 Ponder App
|
||||
|
||||
* Run the ponder app in indexer mode:
|
||||
|
||||
```bash
|
||||
docker exec -it payments-ponder-app-indexer-1 bash -c "DEBUG=laconic:payments pnpm start"
|
||||
|
||||
# Expected output:
|
||||
# 08:00:28.701 INFO payment Nitro node setup with address 0x67D5b55604d1aF90074FcB69b8C51838FFF84f8d
|
||||
# laconic:payments Starting voucher subscription... +0ms
|
||||
# ...
|
||||
# 09:58:54.288 INFO payment Creating ledger channel with nitro node 0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE
|
||||
# ...
|
||||
# 09:59:14.230 INFO payment Creating payment channel with nitro node 0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE
|
||||
# ...
|
||||
# 09:59:14.329 INFO payment Using payment channel 0x10f049519bc3f862e2b26e974be8666886228f30ea54aab06e2f23718afffab0
|
||||
```
|
||||
|
||||
* Export the payment channel id to a variable:
|
||||
|
||||
```bash
|
||||
export PONDER_UPSTREAM_PAYMENT_CHANNEL=<PAYMENT_CHANNEL_ID>
|
||||
```
|
||||
|
||||
* On starting the Ponder app in indexer mode, it creates a payment channel with the `ipld-eth-server`'s Nitro node and then starts the historical sync service
|
||||
|
||||
* The sync service makes several ETH RPC requests to the `ipld-eth-server` to fetch required data; check the payment proxy server logs for charged RPC requests (`eth_getBlockByNumber`, `eth_getLogs`)
|
||||
|
||||
```bash
|
||||
# Expected output:
|
||||
# ...
|
||||
# {"time":"2023-10-06T06:51:45.214478402Z","level":"DEBUG","msg":"Serving RPC request","method":"eth_getBlockByNumber"}
|
||||
# {"time":"2023-10-06T06:51:45.22251171Z","level":"DEBUG","msg":"Request cost","cost-per-byte":1,"response-length":576,"cost":576,"method":"eth_getBlockByNumber"}
|
||||
# {"time":"2023-10-06T06:51:45.222641963Z","level":"DEBUG","msg":"sent message","address":"0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE","method":"receive_voucher"}
|
||||
# {"time":"2023-10-06T06:51:45.224042391Z","level":"DEBUG","msg":"Received voucher","delta":5000}
|
||||
# {"time":"2023-10-06T06:51:45.224061411Z","level":"DEBUG","msg":"Destination request","url":"http://ipld-eth-server:8081/"}
|
||||
# {"time":"2023-10-06T06:51:45.242064953Z","level":"DEBUG","msg":"Serving RPC request","method":"eth_getLogs"}
|
||||
# {"time":"2023-10-06T06:51:45.249118517Z","level":"DEBUG","msg":"Request cost","cost-per-byte":1,"response-length":61,"cost":61,"method":"eth_getLogs"}
|
||||
# {"time":"2023-10-06T06:51:45.249189892Z","level":"DEBUG","msg":"sent message","address":"0xAAA6628Ec44A8a742987EF3A114dDFE2D4F7aDCE","method":"receive_voucher"}
|
||||
# {"time":"2023-10-06T06:51:45.249743149Z","level":"DEBUG","msg":"Received voucher","delta":5000}
|
||||
# {"time":"2023-10-06T06:51:45.249760631Z","level":"DEBUG","msg":"Destination request","url":"http://ipld-eth-server:8081/"}
|
||||
# ...
|
||||
```
|
||||
|
||||
* Check the ponder - ipld-eth-server payment channel status:
|
||||
|
||||
```bash
|
||||
docker exec payments-nitro-rpc-client-1 npm exec -c "nitro-rpc-client get-payment-channel $PONDER_UPSTREAM_PAYMENT_CHANNEL -h go-nitro -p 4005"
|
||||
|
||||
# Expected output ('PaidSoFar' is non zero):
|
||||
# {
|
||||
# ID: '0x1178ac0f2a43e54a122216fa6afdd30333b590e49e50317a1f9274a591da0f96',
|
||||
# Status: 'Open',
|
||||
# Balance: {
|
||||
# AssetAddress: '0x0000000000000000000000000000000000000000',
|
||||
# Payee: '0xaaa6628ec44a8a742987ef3a114ddfe2d4f7adce',
|
||||
# Payer: '0x67d5b55604d1af90074fcb69b8c51838fff84f8d',
|
||||
# PaidSoFar: 215000n,
|
||||
# RemainingFunds: 999785000n
|
||||
# }
|
||||
# }
|
||||
```
|
||||
|
||||
* In another terminal run the ponder app in watcher mode:
|
||||
```bash
|
||||
docker exec -it payments-ponder-app-watcher-1 bash -c "DEBUG=laconic:payments pnpm start"
|
||||
|
||||
# Expected output:
|
||||
# 11:23:22.057 DEBUG app Started using config file: ponder.config.ts
|
||||
# 08:02:12.548 INFO payment Nitro node setup with address 0x111A00868581f73AB42FEEF67D235Ca09ca1E8db
|
||||
# laconic:payments Starting voucher subscription... +0ms
|
||||
# 08:02:17.417 INFO payment Creating ledger channel with nitro node 0x67D5b55604d1aF90074FcB69b8C51838FFF84f8d ...
|
||||
# 08:02:37.135 INFO payment Creating payment channel with nitro node 0x67D5b55604d1aF90074FcB69b8C51838FFF84f8d ...
|
||||
# 08:02:37.313 INFO payment Using payment channel 0x4b8e67f6a6fcfe114fdd60b85f963344ece4c77d4eea3825688c74b45ff5509b
|
||||
# ...
|
||||
# 11:23:22.436 INFO server Started responding as healthy
|
||||
```
|
||||
|
||||
* Check the terminal in which indexer mode ponder is running. Logs of payment for `eth_getLogs` queries can be seen:
|
||||
```bash
|
||||
# ...
|
||||
# 08:02:37.763 DEBUG realtime Finished processing new head block 89 (network=fixturenet)
|
||||
# laconic:payments Received a payment voucher of 50 from 0x111A00868581f73AB42FEEF67D235Ca09ca1E8db +444ms
|
||||
# laconic:payments Serving a paid query for 0x111A00868581f73AB42FEEF67D235Ca09ca1E8db +1ms
|
||||
# 08:02:37.804 DEBUG payment Verified payment for GQL queries getLogEvents
|
||||
# laconic:payments Received a payment voucher of 50 from 0x111A00868581f73AB42FEEF67D235Ca09ca1E8db +45ms
|
||||
# laconic:payments Serving a paid query for 0x111A00868581f73AB42FEEF67D235Ca09ca1E8db +0ms
|
||||
# 08:02:37.849 DEBUG payment Verified payment for GQL queries getLogEvents
|
||||
```
|
||||
|
||||
## Clean Up
|
||||
|
||||
* In the MobyMask app, perform `VIRTUAL DEFUND` and `DIRECT DEFUND` (in order) for closing the payment channel created with watcher
|
||||
|
||||
* Run the following in the browser console to delete the Nitro node's data:
|
||||
|
||||
```bash
|
||||
await clearNodeStorage()
|
||||
```
|
||||
|
||||
* Run the following in the browser console to clear data in local storage:
|
||||
```bash
|
||||
localStorage.clear()
|
||||
```
|
||||
|
||||
* On a fresh restart, clear activity tab data in MetaMask for concerned accounts
|
@ -3,22 +3,22 @@ name: fixturenet-payments
|
||||
description: "Stack to demonstrate payments between various services"
|
||||
repos:
|
||||
# fixturenet repos
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- github.com/cerc-io/ipld-eth-db
|
||||
- github.com/cerc-io/ipld-eth-server
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v1.11.6-statediff-v5
|
||||
# nitro repos
|
||||
- github.com/cerc-io/ts-nitro@v0.1.12
|
||||
- github.com/cerc-io/go-nitro@v0.1.0-ts-port-0.1.4 # TODO: Update after fixes
|
||||
- github.com/cerc-io/ts-nitro@v0.1.13
|
||||
- github.com/cerc-io/go-nitro@v0.1.1-ts-port-0.1.5
|
||||
# mobymask watcher repos
|
||||
- github.com/cerc-io/watcher-ts@v0.2.61
|
||||
- github.com/cerc-io/mobymask-v2-watcher-ts@v3 # TODO: Update after fixes
|
||||
- github.com/cerc-io/watcher-ts@v0.2.63
|
||||
- github.com/cerc-io/mobymask-v2-watcher-ts@v0.2.2
|
||||
- github.com/cerc-io/MobyMask@v0.1.3
|
||||
# mobymask app repos
|
||||
- github.com/cerc-io/mobymask-snap
|
||||
- github.com/cerc-io/mobymask-ui@v0.2.0
|
||||
- github.com/cerc-io/mobymask-ui@v0.2.1
|
||||
# ponder repo
|
||||
- github.com/cerc-io/ponder@laconic
|
||||
- github.com/cerc-io/ponder@laconic-esm
|
||||
containers:
|
||||
# fixturenet images
|
||||
- cerc/go-ethereum
|
||||
@ -31,6 +31,7 @@ containers:
|
||||
- cerc/ipld-eth-server
|
||||
- cerc/nitro-contracts
|
||||
- cerc/go-nitro
|
||||
- cerc/nitro-rpc-client
|
||||
# mobymask watcher images
|
||||
- cerc/watcher-ts
|
||||
- cerc/watcher-mobymask-v3
|
||||
|
@ -12,7 +12,7 @@ See `stacks/fixturenet-eth/README.md` for more information.
|
||||
* cerc/tx-spammer
|
||||
|
||||
## Deploy the stack
|
||||
Note: since some Go dependencies are currently private, `CERC_GO_AUTH_TOKEN` must be set to a valid Gitea access token before running the `build-containers` command.
|
||||
Note: if there are any private Go dependencies, `CERC_GO_AUTH_TOKEN` must be set to a valid Gitea access token before running the `build-containers` command.
|
||||
```
|
||||
$ laconic-so --stack fixturenet-plugeth-tx setup-repositories
|
||||
$ laconic-so --stack fixturenet-plugeth-tx build-containers
|
||||
|
@ -4,10 +4,10 @@ description: "plugeth Ethereum Fixturenet w/ tx-spammer"
|
||||
repos:
|
||||
- git.vdb.to/cerc-io/plugeth@statediff
|
||||
- git.vdb.to/cerc-io/plugeth-statediff
|
||||
- github.com/cerc-io/lighthouse
|
||||
- github.com/cerc-io/ipld-eth-db@v5
|
||||
- github.com/cerc-io/ipld-eth-server@v5
|
||||
- github.com/cerc-io/tx-spammer
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5
|
||||
- git.vdb.to/cerc-io/ipld-eth-server@v5
|
||||
- git.vdb.to/cerc-io/tx-spammer
|
||||
- github.com/dboreham/foundry
|
||||
containers:
|
||||
- cerc/plugeth-statediff
|
||||
|
@ -2,8 +2,8 @@ version: "1.0"
|
||||
name: fixturenet-pocket
|
||||
description: "A single node pocket chain that can serve relays from the geth-1 node in eth-fixturenet"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/pokt-network/pocket-core
|
||||
- github.com/pokt-network/pocket-core-deployments # contains the dockerfile
|
||||
containers:
|
||||
|
@ -2,8 +2,8 @@ version: "1.2"
|
||||
name: mainnet-eth
|
||||
description: "Ethereum Mainnet"
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/dboreham/foundry
|
||||
- git.vdb.to/cerc-io/keycloak-reg-api
|
||||
- git.vdb.to/cerc-io/keycloak-reg-ui
|
||||
|
@ -27,10 +27,7 @@ import sys
|
||||
import tomli
|
||||
import re
|
||||
|
||||
default_spec_file_content = """config:
|
||||
node_moniker: my-node-name
|
||||
chain_id: my-chain-id
|
||||
"""
|
||||
default_spec_file_content = ""
|
||||
|
||||
|
||||
class SetupPhase(Enum):
|
||||
|
@ -1,8 +1,8 @@
|
||||
version: "1.0"
|
||||
name: mobymask-v2
|
||||
repos:
|
||||
- github.com/cerc-io/go-ethereum
|
||||
- github.com/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/go-ethereum@v1.11.6-statediff-v5
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- github.com/dboreham/foundry
|
||||
- github.com/ethereum-optimism/optimism@v1.0.4
|
||||
- github.com/ethereum-optimism/op-geth@v1.101105.2
|
||||
|
@ -2,11 +2,11 @@ version: "1.0"
|
||||
description: "MobyMask v3 stack"
|
||||
name: mobymask-v3
|
||||
repos:
|
||||
- github.com/cerc-io/ts-nitrov0.1.12
|
||||
- github.com/cerc-io/watcher-ts@v0.2.57
|
||||
- github.com/cerc-io/mobymask-v2-watcher-ts@v3 # TODO: Update after fixes
|
||||
- github.com/cerc-io/ts-nitro@v0.1.13
|
||||
- github.com/cerc-io/watcher-ts@v0.2.63
|
||||
- github.com/cerc-io/mobymask-v2-watcher-ts@v0.2.2
|
||||
- github.com/cerc-io/MobyMask@v0.1.3
|
||||
- github.com/cerc-io/mobymask-ui@v0.2.0
|
||||
- github.com/cerc-io/mobymask-ui@v0.2.1
|
||||
containers:
|
||||
- cerc/nitro-contracts
|
||||
- cerc/watcher-ts
|
||||
|
@ -23,7 +23,7 @@ laconic-so --stack mobymask-v3 build-containers --exclude cerc/mobymask-ui
|
||||
Create and update an env file to be used in the next step ([defaults](../../config/watcher-mobymask-v3/mobymask-params.env)):
|
||||
|
||||
```bash
|
||||
# External ETH RPC endpoint (L2 Optimism geth)
|
||||
# External ETH RPC endpoint for contract(s) deployment
|
||||
CERC_ETH_RPC_ENDPOINT=
|
||||
|
||||
# External ETH RPC endpoint used for queries in the watcher
|
||||
@ -32,6 +32,9 @@ Create and update an env file to be used in the next step ([defaults](../../conf
|
||||
# External ETH RPC endpoint used for mutations in the watcher
|
||||
CERC_ETH_RPC_MUTATION_ENDPOINT=
|
||||
|
||||
# External ETH endpoint used by watcher's Nitro node
|
||||
CERC_NITRO_CHAIN_URL=
|
||||
|
||||
# Specify the an account PK for contract deployment
|
||||
CERC_PRIVATE_KEY_DEPLOYER=
|
||||
|
||||
|
@ -2,7 +2,7 @@ version: "1.1"
|
||||
name: package-registry
|
||||
description: "Local Package Registry"
|
||||
repos:
|
||||
- github.com/cerc-io/hosting
|
||||
- git.vdb.to/cerc-io/hosting
|
||||
- gitea.com/gitea/act_runner
|
||||
containers:
|
||||
- cerc/act-runner
|
||||
|
@ -20,7 +20,7 @@ from app.deploy_util import VolumeMapping, run_container_command
|
||||
from pathlib import Path
|
||||
|
||||
default_spec_file_content = """config:
|
||||
config_variable: test-value
|
||||
test-variable-1: test-value-1
|
||||
"""
|
||||
|
||||
|
||||
|
@ -20,13 +20,12 @@ import copy
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from decouple import config
|
||||
from importlib import resources
|
||||
import subprocess
|
||||
from python_on_whales import DockerClient, DockerException
|
||||
import click
|
||||
from pathlib import Path
|
||||
from app.util import include_exclude_check, get_parsed_stack_config, global_options2
|
||||
from app.util import include_exclude_check, get_parsed_stack_config, global_options2, get_dev_root_path
|
||||
from app.deploy_types import ClusterContext, DeployCommandContext
|
||||
from app.deployment_create import create as deployment_create
|
||||
from app.deployment_create import init as deployment_init
|
||||
@ -42,9 +41,16 @@ from app.deployment_create import setup as deployment_setup
|
||||
def command(ctx, include, exclude, env_file, cluster):
|
||||
'''deploy a stack'''
|
||||
|
||||
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
|
||||
# Click doesn't allow us to know that here, so we make providing the stack mandatory
|
||||
stack = global_options2(ctx).stack
|
||||
if not stack:
|
||||
print("Error: --stack option is required")
|
||||
sys.exit(1)
|
||||
|
||||
if ctx.parent.obj.debug:
|
||||
print(f"ctx.parent.obj: {ctx.parent.obj}")
|
||||
ctx.obj = create_deploy_context(global_options2(ctx), global_options2(ctx).stack, include, exclude, cluster, env_file)
|
||||
ctx.obj = create_deploy_context(global_options2(ctx), stack, include, exclude, cluster, env_file)
|
||||
# Subcommand is executed now, by the magic of click
|
||||
|
||||
|
||||
@ -235,17 +241,15 @@ def _make_runtime_env(ctx):
|
||||
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
|
||||
if ctx.local_stack:
|
||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||
else:
|
||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||
dev_root_path = get_dev_root_path(ctx)
|
||||
|
||||
# TODO: huge hack, fix this
|
||||
# If the caller passed a path for the stack file, then we know that we can get the compose files
|
||||
# from the same directory
|
||||
deployment = False
|
||||
if isinstance(stack, os.PathLike):
|
||||
compose_dir = stack.parent.joinpath("compose")
|
||||
deployment = True
|
||||
else:
|
||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
|
||||
@ -295,6 +299,16 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||
if include_exclude_check(pod_name, include, exclude):
|
||||
if pod_repository is None or pod_repository == "internal":
|
||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||
else:
|
||||
if deployment:
|
||||
compose_file_name = os.path.join(compose_dir, "docker-compose.yml")
|
||||
pod_pre_start_command = pod["pre_start_command"]
|
||||
pod_post_start_command = pod["post_start_command"]
|
||||
script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
|
||||
if pod_pre_start_command is not None:
|
||||
pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
|
||||
if pod_post_start_command is not None:
|
||||
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
||||
else:
|
||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||
compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml")
|
||||
|
@ -16,14 +16,14 @@
|
||||
import os
|
||||
from typing import List
|
||||
from app.deploy_types import DeployCommandContext, VolumeMapping
|
||||
from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir
|
||||
from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir, get_pod_list
|
||||
|
||||
|
||||
def _container_image_from_service(stack: str, service: str):
|
||||
# Parse the compose files looking for the image name of the specified service
|
||||
image_name = None
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = parsed_stack["pods"]
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
||||
|
@ -25,6 +25,16 @@ from app.deploy import exec_operation, logs_operation, create_deploy_context
|
||||
class DeploymentContext:
|
||||
dir: Path
|
||||
|
||||
def get_stack_file(self):
|
||||
return self.dir.joinpath("stack.yml")
|
||||
|
||||
def get_env_file(self):
|
||||
return self.dir.joinpath("config.env")
|
||||
|
||||
# TODO: implement me
|
||||
def get_cluster_name(self):
|
||||
return None
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option("--dir", required=True, help="path to deployment directory")
|
||||
@ -49,10 +59,10 @@ def command(ctx, dir):
|
||||
|
||||
|
||||
def make_deploy_context(ctx):
|
||||
# Get the stack config file name
|
||||
stack_file_path = ctx.obj.dir.joinpath("stack.yml")
|
||||
# TODO: add cluster name and env file here
|
||||
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, None, None)
|
||||
stack_file_path = ctx.obj.get_stack_file()
|
||||
env_file = ctx.obj.get_env_file()
|
||||
cluster_name = ctx.obj.get_cluster_name()
|
||||
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, cluster_name, env_file)
|
||||
|
||||
|
||||
@command.command()
|
||||
|
@ -17,12 +17,13 @@ import click
|
||||
from importlib import util
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
import random
|
||||
from shutil import copyfile, copytree
|
||||
from shutil import copy, copyfile, copytree
|
||||
import sys
|
||||
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
|
||||
from app.util import get_compose_file_dir
|
||||
from app.deploy_types import DeploymentContext, LaconicStackSetupCommand
|
||||
from app.util import (get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml,
|
||||
get_pod_list, get_pod_file_path, pod_has_scripts, get_pod_script_paths, get_plugin_code_path)
|
||||
from app.deploy_types import DeploymentContext, DeployCommandContext, LaconicStackSetupCommand
|
||||
|
||||
|
||||
def _make_default_deployment_dir():
|
||||
@ -32,10 +33,10 @@ def _make_default_deployment_dir():
|
||||
def _get_ports(stack):
|
||||
ports = {}
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = parsed_stack["pods"]
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "services" in parsed_pod_file:
|
||||
for svc_name, svc in parsed_pod_file["services"].items():
|
||||
@ -49,10 +50,10 @@ def _get_named_volumes(stack):
|
||||
# Parse the compose files looking for named volumes
|
||||
named_volumes = []
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = parsed_stack["pods"]
|
||||
pods = get_pod_list(parsed_stack)
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
if "volumes" in parsed_pod_file:
|
||||
volumes = parsed_pod_file["volumes"]
|
||||
@ -105,15 +106,26 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
||||
pod["services"][container_name]["ports"] = container_ports
|
||||
|
||||
|
||||
def _commands_plugin_path(ctx: DeployCommandContext):
|
||||
plugin_path = get_plugin_code_path(ctx.stack)
|
||||
return plugin_path.joinpath("deploy", "commands.py")
|
||||
|
||||
|
||||
# See: https://stackoverflow.com/a/54625079/1701505
|
||||
def _has_method(o, name):
|
||||
return callable(getattr(o, name, None))
|
||||
|
||||
|
||||
def call_stack_deploy_init(deploy_command_context):
|
||||
# Link with the python file in the stack
|
||||
# Call a function in it
|
||||
# If no function found, return None
|
||||
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||
python_file_path = _commands_plugin_path(deploy_command_context)
|
||||
if python_file_path.exists():
|
||||
spec = util.spec_from_file_location("commands", python_file_path)
|
||||
imported_stack = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(imported_stack)
|
||||
if _has_method(imported_stack, "init"):
|
||||
return imported_stack.init(deploy_command_context)
|
||||
else:
|
||||
return None
|
||||
@ -124,11 +136,13 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu
|
||||
# Link with the python file in the stack
|
||||
# Call a function in it
|
||||
# If no function found, return None
|
||||
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||
python_file_path = _commands_plugin_path(deploy_command_context)
|
||||
print(f"Path: {python_file_path}")
|
||||
if python_file_path.exists():
|
||||
spec = util.spec_from_file_location("commands", python_file_path)
|
||||
imported_stack = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(imported_stack)
|
||||
if _has_method(imported_stack, "setup"):
|
||||
return imported_stack.setup(deploy_command_context, parameters, extra_args)
|
||||
else:
|
||||
return None
|
||||
@ -139,11 +153,12 @@ def call_stack_deploy_create(deployment_context, extra_args):
|
||||
# Link with the python file in the stack
|
||||
# Call a function in it
|
||||
# If no function found, return None
|
||||
python_file_path = get_stack_file_path(deployment_context.command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||
python_file_path = _commands_plugin_path(deployment_context.command_context)
|
||||
if python_file_path.exists():
|
||||
spec = util.spec_from_file_location("commands", python_file_path)
|
||||
imported_stack = util.module_from_spec(spec)
|
||||
spec.loader.exec_module(imported_stack)
|
||||
if _has_method(imported_stack, "create"):
|
||||
return imported_stack.create(deployment_context, extra_args)
|
||||
else:
|
||||
return None
|
||||
@ -204,22 +219,48 @@ def _get_mapped_ports(stack: str, map_recipe: str):
|
||||
return ports
|
||||
|
||||
|
||||
def _parse_config_variables(variable_values: str):
|
||||
result = None
|
||||
if variable_values:
|
||||
value_pairs = variable_values.split(",")
|
||||
if len(value_pairs):
|
||||
result_values = {}
|
||||
for value_pair in value_pairs:
|
||||
variable_value_pair = value_pair.split("=")
|
||||
if len(variable_value_pair) != 2:
|
||||
print(f"ERROR: config argument is not valid: {variable_values}")
|
||||
sys.exit(1)
|
||||
variable_name = variable_value_pair[0]
|
||||
variable_value = variable_value_pair[1]
|
||||
result_values[variable_name] = variable_value
|
||||
result = {"config": result_values}
|
||||
return result
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option("--config", help="Provide config variables for the deployment")
|
||||
@click.option("--output", required=True, help="Write yaml spec file here")
|
||||
@click.option("--map-ports-to-host", required=False,
|
||||
help="Map ports to the host as one of: any-variable-random (default), "
|
||||
"localhost-same, any-same, localhost-fixed-random, any-fixed-random")
|
||||
@click.pass_context
|
||||
def init(ctx, output, map_ports_to_host):
|
||||
def init(ctx, config, output, map_ports_to_host):
|
||||
yaml = get_yaml()
|
||||
stack = global_options(ctx).stack
|
||||
verbose = global_options(ctx).verbose
|
||||
debug = global_options(ctx).debug
|
||||
default_spec_file_content = call_stack_deploy_init(ctx.obj)
|
||||
spec_file_content = {"stack": stack}
|
||||
if default_spec_file_content:
|
||||
spec_file_content.update(default_spec_file_content)
|
||||
if verbose:
|
||||
print(f"Creating spec file for stack: {stack}")
|
||||
config_variables = _parse_config_variables(config)
|
||||
if config_variables:
|
||||
# Implement merge, since update() overwrites
|
||||
orig_config = spec_file_content["config"]
|
||||
new_config = config_variables["config"]
|
||||
merged_config = {**new_config, **orig_config}
|
||||
spec_file_content.update({"config": merged_config})
|
||||
if debug:
|
||||
print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
|
||||
|
||||
ports = _get_mapped_ports(stack, map_ports_to_host)
|
||||
spec_file_content["ports"] = ports
|
||||
@ -235,6 +276,23 @@ def init(ctx, output, map_ports_to_host):
|
||||
yaml.dump(spec_file_content, output_file)
|
||||
|
||||
|
||||
def _write_config_file(spec_file: Path, config_env_file: Path):
|
||||
spec_content = get_parsed_deployment_spec(spec_file)
|
||||
# Note: we want to write an empty file even if we have no config variables
|
||||
with open(config_env_file, "w") as output_file:
|
||||
if "config" in spec_content and spec_content["config"]:
|
||||
config_vars = spec_content["config"]
|
||||
if config_vars:
|
||||
for variable_name, variable_value in config_vars.items():
|
||||
output_file.write(f"{variable_name}={variable_value}\n")
|
||||
|
||||
|
||||
def _copy_files_to_directory(file_paths: List[Path], directory: Path):
|
||||
for path in file_paths:
|
||||
# Using copy to preserve the execute bit
|
||||
copy(path, os.path.join(directory, os.path.basename(path)))
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||
@ -259,16 +317,22 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
# Copy spec file and the stack file into the deployment dir
|
||||
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
|
||||
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
|
||||
# Copy any config varibles from the spec file into an env file suitable for compose
|
||||
_write_config_file(spec_file, os.path.join(deployment_dir, "config.env"))
|
||||
# Copy the pod files into the deployment dir, fixing up content
|
||||
pods = parsed_stack['pods']
|
||||
pods = get_pod_list(parsed_stack)
|
||||
destination_compose_dir = os.path.join(deployment_dir, "compose")
|
||||
os.mkdir(destination_compose_dir)
|
||||
destination_pods_dir = os.path.join(deployment_dir, "pods")
|
||||
os.mkdir(destination_pods_dir)
|
||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||
yaml = get_yaml()
|
||||
for pod in pods:
|
||||
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
|
||||
pod_file_path = get_pod_file_path(parsed_stack, pod)
|
||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
||||
destination_pod_dir = os.path.join(destination_pods_dir, pod)
|
||||
os.mkdir(destination_pod_dir)
|
||||
if global_options(ctx).debug:
|
||||
print(f"extra config dirs: {extra_config_dirs}")
|
||||
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
||||
@ -284,6 +348,12 @@ def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
# If the same config dir appears in multiple pods, it may already have been copied
|
||||
if not os.path.exists(destination_config_dir):
|
||||
copytree(source_config_dir, destination_config_dir)
|
||||
# Copy the script files for the pod, if any
|
||||
if pod_has_scripts(parsed_stack, pod):
|
||||
destination_script_dir = os.path.join(destination_pod_dir, "scripts")
|
||||
os.mkdir(destination_script_dir)
|
||||
script_paths = get_pod_script_paths(parsed_stack, pod)
|
||||
_copy_files_to_directory(script_paths, destination_script_dir)
|
||||
# Delegate to the stack's Python code
|
||||
# The deploy create command doesn't require a --stack argument so we need to insert the
|
||||
# stack member here.
|
||||
|
73
app/util.py
73
app/util.py
@ -13,6 +13,7 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
from decouple import config
|
||||
import os.path
|
||||
import sys
|
||||
import ruamel.yaml
|
||||
@ -37,6 +38,16 @@ def get_stack_file_path(stack):
|
||||
return stack_file_path
|
||||
|
||||
|
||||
def get_dev_root_path(ctx):
|
||||
if ctx and ctx.local_stack:
|
||||
# TODO: This code probably doesn't work
|
||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||
else:
|
||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||
return dev_root_path
|
||||
|
||||
|
||||
# Caller can pass either the name of a stack, or a path to a stack file
|
||||
def get_parsed_stack_config(stack):
|
||||
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
|
||||
@ -56,6 +67,68 @@ def get_parsed_stack_config(stack):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_pod_list(parsed_stack):
|
||||
# Handle both old and new format
|
||||
pods = parsed_stack["pods"]
|
||||
if type(pods[0]) is str:
|
||||
result = pods
|
||||
else:
|
||||
result = []
|
||||
for pod in pods:
|
||||
result.append(pod["name"])
|
||||
return result
|
||||
|
||||
|
||||
def get_plugin_code_path(stack):
|
||||
parsed_stack = get_parsed_stack_config(stack)
|
||||
pods = parsed_stack["pods"]
|
||||
# TODO: Hack
|
||||
pod = pods[0]
|
||||
if type(pod) is str:
|
||||
result = get_stack_file_path(stack).parent
|
||||
else:
|
||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
||||
result = Path(os.path.join(pod_root_dir, "stack"))
|
||||
return result
|
||||
|
||||
|
||||
def get_pod_file_path(parsed_stack, pod_name: str):
|
||||
pods = parsed_stack["pods"]
|
||||
if type(pods[0]) is str:
|
||||
result = os.path.join(get_compose_file_dir(), f"docker-compose-{pod_name}.yml")
|
||||
else:
|
||||
for pod in pods:
|
||||
if pod["name"] == pod_name:
|
||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
||||
result = os.path.join(pod_root_dir, "docker-compose.yml")
|
||||
return result
|
||||
|
||||
|
||||
def get_pod_script_paths(parsed_stack, pod_name: str):
|
||||
pods = parsed_stack["pods"]
|
||||
result = []
|
||||
if not type(pods[0]) is str:
|
||||
for pod in pods:
|
||||
if pod["name"] == pod_name:
|
||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
||||
if "pre_start_command" in pod:
|
||||
result.append(os.path.join(pod_root_dir, pod["pre_start_command"]))
|
||||
if "post_start_command" in pod:
|
||||
result.append(os.path.join(pod_root_dir, pod["post_start_command"]))
|
||||
return result
|
||||
|
||||
|
||||
def pod_has_scripts(parsed_stack, pod_name: str):
|
||||
pods = parsed_stack["pods"]
|
||||
if type(pods[0]) is str:
|
||||
result = False
|
||||
else:
|
||||
for pod in pods:
|
||||
if pod["name"] == pod_name:
|
||||
result = "pre_start_command" in pod or "post_start_command" in pod
|
||||
return result
|
||||
|
||||
|
||||
def get_compose_file_dir():
|
||||
# TODO: refactor to use common code with deploy command
|
||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||
|
@ -77,7 +77,7 @@ $TEST_TARGET_SO --stack test deploy down --delete-volumes
|
||||
# Basic test of creating a deployment
|
||||
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
||||
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
||||
$TEST_TARGET_SO --stack test deploy init --output $test_deployment_spec
|
||||
$TEST_TARGET_SO --stack test deploy init --output $test_deployment_spec --config CERC_TEST_PARAM_1=PASSED
|
||||
# Check the file now exists
|
||||
if [ ! -f "$test_deployment_spec" ]; then
|
||||
echo "deploy init test: spec file not present"
|
||||
@ -85,7 +85,7 @@ if [ ! -f "$test_deployment_spec" ]; then
|
||||
exit 1
|
||||
fi
|
||||
echo "deploy init test: passed"
|
||||
$TEST_TARGET_SO deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||
# Check the deployment dir exists
|
||||
if [ ! -d "$test_deployment_dir" ]; then
|
||||
echo "deploy create test: deployment directory not present"
|
||||
@ -110,13 +110,20 @@ echo "deploy create output file test: passed"
|
||||
# Try to start the deployment
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||
# Check logs command works
|
||||
log_output_2=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
|
||||
log_output_3=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||
if [[ "$log_output_3" == *"Filesystem is fresh"* ]]; then
|
||||
echo "deployment logs test: passed"
|
||||
else
|
||||
echo "deployment logs test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Check the config variable CERC_TEST_PARAM_1 was passed correctly
|
||||
if [[ "$log_output_3" == *"Test-param-1: PASSED"* ]]; then
|
||||
echo "deployment config test: passed"
|
||||
else
|
||||
echo "deployment config test: FAILED"
|
||||
exit 1
|
||||
fi
|
||||
# Stop and clean up
|
||||
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||
echo "Test passed"
|
||||
|
@ -37,8 +37,4 @@ $TEST_TARGET_SO --stack test deploy-system down
|
||||
# Run same test but not using the stack definition
|
||||
# Test building the a stack container
|
||||
$TEST_TARGET_SO build-containers --include cerc/test-container
|
||||
# Deploy the test container
|
||||
$TEST_TARGET_SO deploy-system --include test up
|
||||
# Clean up
|
||||
$TEST_TARGET_SO deploy-system --include test down
|
||||
echo "Test passed"
|
||||
|
Loading…
Reference in New Issue
Block a user