Merge branch 'main' into dboreham/mainnet-laconic-setup

This commit is contained in:
David Boreham 2023-08-14 15:51:30 -06:00
commit fc52dacb60
67 changed files with 1381 additions and 285 deletions

View File

@ -0,0 +1,49 @@
version: '3.2'
services:
# Deploys the core (UniswapV3Factory) contract
sushiswap-v3-core:
image: cerc/sushiswap-v3-core:local
restart: on-failure
env_file:
# Defaults
- ../config/contract-sushiswap/deployment-params.env
environment:
# Overrides
CERC_ETH_RPC_ENDPOINT: ${ETH_RPC_ENDPOINT}
CERC_CHAIN_ID: ${CHAIN_ID}
CERC_ACCOUNT_PRIVATE_KEY: ${ACCOUNT_PRIVATE_KEY}
CERC_DEPLOY: ${DEPLOY}
volumes:
- ../config/network/wait-for-it.sh:/app/wait-for-it.sh
- ../config/contract-sushiswap/deploy-core-contracts.sh:/app/deploy-core-contracts.sh
- sushiswap_core_deployment:/app/deployments/docker
command: ["bash", "-c", "/app/deploy-core-contracts.sh && tail -f"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Deploys the periphery (NFPM, token, etc.) contracts
sushiswap-v3-periphery:
image: cerc/sushiswap-v3-periphery:local
restart: on-failure
env_file:
# Defaults
- ../config/contract-sushiswap/deployment-params.env
environment:
# Overrides
CERC_ETH_RPC_ENDPOINT: ${ETH_RPC_ENDPOINT}
CERC_CHAIN_ID: ${CHAIN_ID}
CERC_ACCOUNT_PRIVATE_KEY: ${ACCOUNT_PRIVATE_KEY}
CERC_DEPLOY: ${DEPLOY}
volumes:
- ../config/network/wait-for-it.sh:/app/wait-for-it.sh
- ../config/contract-sushiswap/deploy-periphery-contracts.sh:/app/deploy-periphery-contracts.sh
- sushiswap_core_deployment:/app/core-deployments/docker
- sushiswap_periphery_deployment:/app/deployments/docker
command: ["bash", "-c", "/app/deploy-periphery-contracts.sh && tail -f"]
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
sushiswap_core_deployment:
sushiswap_periphery_deployment:

View File

@ -21,7 +21,7 @@ services:
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true}
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect}
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}

View File

@ -0,0 +1,69 @@
services:
graph-node:
image: cerc/graph-node:local
depends_on:
db:
condition: service_healthy
ipfs:
condition: service_healthy
lotus-node-1:
condition: service_healthy
extra_hosts:
- host.docker.internal:host-gateway
environment:
ipfs: ipfs:5001
postgres_host: db
postgres_port: 5432
postgres_user: graph-node
postgres_pass: password
postgres_db: graph-node
# TODO: Get endpoint from env
ethereum: 'lotus-fixturenet:http://lotus-node-1:1234/rpc/v1'
GRAPH_LOG: info
ports:
- "8000"
- "8001"
- "8020"
- "8030"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "8020"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
volumes:
- ipfs-import:/import
- ipfs-data:/data/ipfs
ports:
- "8080"
- "4001"
- "5001"
db:
image: postgres:14-alpine
volumes:
- db-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: "graph-node"
POSTGRES_DB: "graph-node"
POSTGRES_PASSWORD: "password"
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
command:
[
"postgres",
"-cshared_preload_libraries=pg_stat_statements"
]
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- "5432"
volumes:
ipfs-import:
ipfs-data:
db-data:

View File

@ -8,17 +8,10 @@ services:
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-miner.sh:/docker-entrypoint-scripts.d/setup-miner.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- $HOME/stack-orchestrator/app/data/config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors
- lotus-shared:/root/.lotus-shared
healthcheck:
# test: ["CMD-SHELL", "grep 'started ChainNotify channel' /var/log/lotus.log"]
# test: ["CMD-SHELL", "[ -f /root/.lotus-shared/miner.addr ]"]
test: ["CMD-SHELL", "[ -d /root/.lotus-miner-local-net ]"]
interval: 10s
timeout: 10s
retries: 10
start_period: 60s
- ../config/fixturenet-lotus/fund-account.sh:/fund-account.sh
- lotus_miner_params:/var/tmp/filecoin-proof-parameters
- lotus_shared:/root/.lotus-shared
- lotus_miner_data:/root/data
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-miner.sh"]
ports:
- "1234"
@ -30,14 +23,23 @@ services:
hostname: lotus-node-1
env_file:
- ../config/fixturenet-lotus/lotus-env.env
environment:
# Use 0.0.0.0 so that calls can be made from outside the container
- LOTUS_API_LISTENADDRESS=/ip4/0.0.0.0/tcp/1234/http
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
- lotus_node_1_params:/var/tmp/filecoin-proof-parameters
- lotus_shared:/root/.lotus-shared
- lotus_node_1_data:/root/data
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "1234"]
interval: 30s
timeout: 10s
retries: 60
start_period: 3s
depends_on:
lotus-miner:
condition: service_healthy
- lotus-miner
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
@ -49,14 +51,23 @@ services:
hostname: lotus-node-2
env_file:
- ../config/fixturenet-lotus/lotus-env.env
environment:
# Use 0.0.0.0 so that calls can be made from outside the container
- LOTUS_API_LISTENADDRESS=/ip4/0.0.0.0/tcp/1234/http
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
- lotus_node_2_params:/var/tmp/filecoin-proof-parameters
- lotus_shared:/root/.lotus-shared
- lotus_node_2_data:/root/data
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "1234"]
interval: 30s
timeout: 10s
retries: 60
start_period: 3s
depends_on:
lotus-miner:
condition: service_healthy
- lotus-miner
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
@ -65,4 +76,10 @@ services:
- "1777"
volumes:
lotus-shared:
lotus_miner_params:
lotus_node_1_params:
lotus_node_2_params:
lotus_shared:
lotus_miner_data:
lotus_node_1_data:
lotus_node_2_data:

View File

@ -19,7 +19,7 @@ services:
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true}
CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}

View File

@ -25,8 +25,8 @@ services:
PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090"
LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: "true"
LOG_LEVEL: "debug"
CERC_REMOTE_DEBUG: ${CERC_REMOTE_DEBUG:-true}
volumes:
- type: bind
source: ../config/ipld-eth-server/chain.json

View File

@ -0,0 +1,26 @@
version: '3.2'
services:
# Deploys the sushiswap v3 subgraph
sushiswap-subgraph-v3:
image: cerc/sushiswap-subgraphs:local
restart: on-failure
depends_on:
graph-node:
condition: service_healthy
environment:
- APP=v3
- NETWORK=lotus-fixturenet
command: ["bash", "-c", "./run-v3.sh"]
working_dir: /app/subgraphs/v3
volumes:
- ../config/sushiswap-subgraph-v3/lotus-fixturenet.js.template:/app/config/lotus-fixturenet.js.template
- ../config/sushiswap-subgraph-v3/run-v3.sh:/app/subgraphs/v3/run-v3.sh
- sushiswap_core_deployment:/app/subgraphs/v3/core-deployments/docker
- sushiswap_periphery_deployment:/app/subgraphs/v3/deployments/docker
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
sushiswap_core_deployment:
sushiswap_periphery_deployment:

View File

@ -0,0 +1,187 @@
version: '3.2'
services:
sushiswap-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=erc20-watcher,sushi-watcher,sushi-info-watcher,erc20-watcher-job-queue,sushi-watcher-job-queue,sushi-info-watcher-job-queue
- POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto,sushi-watcher-job-queue:pgcrypto,sushi-info-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "work_mem=2GB"]
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- ../config/postgresql/create-pg-stat-statements.sql:/docker-entrypoint-initdb.d/create-pg-stat-statements.sql
- sushiswap_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15435:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
shm_size: '8GB'
erc20-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/erc20-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports:
- "0.0.0.0:3005:3001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-watcher-job-runner:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
lotus-node-1:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/uni-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/job-runner.js"]
volumes:
- ../config/watcher-sushiswap/sushi-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-watcher-test.toml:/app/packages/uni-watcher/environments/test.toml
ports:
- "0.0.0.0:9004:9000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
sushi-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-sushiswap:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
environment:
- DEBUG=vulcanize:*
working_dir: /app/packages/uni-watcher
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/sushi-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-watcher-test.toml:/app/packages/uni-watcher/environments/test.toml
ports:
- "0.0.0.0:3003:3003"
- "0.0.0.0:9005:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3003"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-info-watcher-job-runner:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
lotus-node-1:
condition: service_healthy
sushi-watcher-server:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/uni-info-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/job-runner.js"]
volumes:
- ../config/watcher-sushiswap/sushi-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-info-watcher-test.toml:/app/packages/uni-info-watcher/environments/test.toml
ports:
- "0.0.0.0:9006:9002"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-info-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
sushi-watcher-server:
condition: service_healthy
sushi-info-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-sushiswap:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
working_dir: /app/packages/uni-info-watcher
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/sushi-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-info-watcher-test.toml:/app/packages/uni-info-watcher/environments/test.toml
ports:
- "0.0.0.0:3004:3004"
- "0.0.0.0:9007:9003"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushiswap-v3-info:
depends_on:
sushi-info-watcher-server:
condition: service_healthy
image: cerc/uniswap-v3-info:local
ports:
- "0.0.0.0:3006:3000"
# Deploys the core (UniswapV3Factory) contract
sushiswap-v3-core:
image: cerc/sushiswap-v3-core:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
# Deploys the periphery (NFPM, token, etc.) contracts
sushiswap-v3-periphery:
image: cerc/sushiswap-v3-periphery:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
volumes:
sushiswap_watcher_db_data:

View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
# Chain config
export ETH_RPC_ENDPOINT="${CERC_ETH_RPC_ENDPOINT:-${CERC_DEFAULT_ETH_RPC_ENDPOINT}}"
export CHAIN_ID="${CERC_CHAIN_ID:-${CERC_DEFAULT_CHAIN_ID}}"
export ACCOUNT_PRIVATE_KEY="${CERC_ACCOUNT_PRIVATE_KEY:-${CERC_DEFAULT_ACCOUNT_PRIVATE_KEY}}"
# Option
DEPLOY="${CERC_DEPLOY:-${CERC_DEFAULT_DEPLOY}}"
# Create a .env file
echo "ETH_RPC_ENDPOINT=$ETH_RPC_ENDPOINT" > .env
echo "CHAIN_ID=$CHAIN_ID" >> .env
echo "ACCOUNT_PRIVATE_KEY=$ACCOUNT_PRIVATE_KEY" >> .env
echo "Using RPC endpoint ${ETH_RPC_ENDPOINT}"
# Wait for the RPC endpoint to be up
endpoint=${ETH_RPC_ENDPOINT#http://}
endpoint=${endpoint#https://}
RPC_HOST=$(echo "$endpoint" | awk -F'[:/]' '{print $1}')
RPC_PORT=$(echo "$endpoint" | awk -F'[:/]' '{print $2}')
./wait-for-it.sh -h "${RPC_HOST}" -p "${RPC_PORT}" -s -t 0
if [ "$DEPLOY" = true ] && [ ! -e "/app/deployments/docker/UniswapV3Factory.json" ]; then
echo "Performing core contract deployments..."
pnpm hardhat --network docker deploy --tags UniswapV3Factory
else
echo "Skipping contract deployments"
fi
echo "Done"

View File

@ -0,0 +1,46 @@
#!/bin/bash
set -e
# Chain config
ETH_RPC_ENDPOINT="${CERC_ETH_RPC_ENDPOINT:-${CERC_DEFAULT_ETH_RPC_ENDPOINT}}"
CHAIN_ID="${CERC_CHAIN_ID:-${CERC_DEFAULT_CHAIN_ID}}"
ACCOUNT_PRIVATE_KEY="${CERC_ACCOUNT_PRIVATE_KEY:-${CERC_DEFAULT_ACCOUNT_PRIVATE_KEY}}"
# Option
DEPLOY="${CERC_DEPLOY:-${CERC_DEFAULT_DEPLOY}}"
# Create a .env file
echo "ETH_RPC_ENDPOINT=$ETH_RPC_ENDPOINT" > .env
echo "CHAIN_ID=$CHAIN_ID" >> .env
echo "ACCOUNT_PRIVATE_KEY=$ACCOUNT_PRIVATE_KEY" >> .env
echo "Using RPC endpoint $ETH_RPC_ENDPOINT"
# Wait for the RPC endpoint to be up
endpoint=${ETH_RPC_ENDPOINT#http://}
endpoint=${endpoint#https://}
RPC_HOST=$(echo "$endpoint" | awk -F'[:/]' '{print $1}')
RPC_PORT=$(echo "$endpoint" | awk -F'[:/]' '{print $2}')
./wait-for-it.sh -h "${RPC_HOST}" -p "${RPC_PORT}" -s -t 0
if [ "$DEPLOY" = true ] && [ ! -e "/app/deployments/docker/NonfungiblePositionManager.json" ]; then
# Loop until the factory deployment is detected
echo "Waiting for core deployments to occur"
while [ ! -f /app/core-deployments/docker/UniswapV3Factory.json ]; do
sleep 5
done
echo "Reading factory address from core deployments"
FACTORY_ADDRESS=$(jq -r '.address' /app/core-deployments/docker/UniswapV3Factory.json)
echo "Using UniswapV3Factory at $FACTORY_ADDRESS"
echo "FACTORY_ADDRESS=$FACTORY_ADDRESS" >> .env
echo "Performing periphery contract deployments..."
yarn hardhat --network docker deploy --tags NonfungiblePositionManager
else
echo "Skipping contract deployments"
fi
echo "Done"

View File

@ -0,0 +1,11 @@
# Chain config
CERC_DEFAULT_ETH_RPC_ENDPOINT="http://lotus-node-1:1234/rpc/v1"
CERC_DEFAULT_CHAIN_ID=31415926
# From app/data/config/fixturenet-lotus/fund-account.sh
CERC_DEFAULT_ACCOUNT_PRIVATE_KEY="0xc05fd3613bcd62a4f25e5eba1f464d0b76d74c3f771a7c2f13e26ad6439444b3"
# Options
CERC_DEFAULT_DEPLOY=true

View File

@ -18,7 +18,8 @@ CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18}
CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_DB_LOG_STATEMENTS="${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false}"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"
CERC_GETH_VERBOSITY=${CERC_GETH_VERBOSITY:-3}

View File

@ -0,0 +1,20 @@
#!/bin/bash
# ETH account with pk c05fd3613bcd62a4f25e5eba1f464d0b76d74c3f771a7c2f13e26ad6439444b3
ETH_ADDRESS=0xD375B03bd3A2434A9f675bEC4Ccd68aC5e67C743
AMOUNT=1000
# Pre-fund stat
PREFUND_STAT_OUTPUT=$(lotus evm stat $ETH_ADDRESS)
FILECOIN_ADDRESS=$(echo "$PREFUND_STAT_OUTPUT" | grep -oP 'Filecoin address:\s+\K\S+')
echo Filecoin address: "$FILECOIN_ADDRESS"
echo Sending balance to "$FILECOIN_ADDRESS"
lotus send --from $(lotus wallet default) "$FILECOIN_ADDRESS" $AMOUNT
# Post-fund stat
echo lotus evm stat $ETH_ADDRESS
lotus evm stat $ETH_ADDRESS
echo "Account with ETH address $ETH_ADDRESS funded"

View File

@ -1 +0,0 @@
}+V<>{iνΆΠΉ<CEA0>²<EFBFBD>¨ΣΗ\k»qς  —?δΪAΒ~μ©™LΉ<4C>tb·yqτ·²ηξΔ<CEBE>Ο?ξaΣ<61>J

View File

@ -1 +0,0 @@
Β~μ©™LΉ<4C>tb·yqτ·²ηξΔ<CEBE>Ο?ξaΣ<61>J

View File

@ -1,71 +0,0 @@
{
"t01000": {
"ID": "t01000",
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf"
},
"CommD": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq"
},
"CommD": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
}
]
}
}

View File

@ -1 +0,0 @@
7b2254797065223a22626c73222c22507269766174654b6579223a227446765352695367324733537367673050535979323358796a61494d5870736d64794732423755464c54343d227d

View File

@ -1,11 +0,0 @@
{
"ID": "f355523e-69d0-4984-bd0e-9588487c6231",
"Weight": 0,
"CanSeal": false,
"CanStore": false,
"MaxStorage": 0,
"Groups": null,
"AllowTo": null,
"AllowTypes": null,
"DenyTypes": null
}

View File

@ -1,108 +0,0 @@
{
"NetworkVersion": 18,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf"
},
"CommD": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq"
},
"CommD": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-6d52dae5-ff29-4bac-a45d-f84e6c07564c",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -1,5 +1,6 @@
LOTUS_PATH=~/.lotus-local-net
LOTUS_MINER_PATH=~/.lotus-miner-local-net
LOTUS_PATH=/root/data/.lotus-local-net
LOTUS_MINER_PATH=/root/data/.lotus-miner-local-net
LOTUS_SKIP_GENESIS_CHECK=_yes_
LOTUS_FEVM_ENABLEETHRPC=true
CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
CGO_CFLAGS="-D__BLST_PORTABLE__"

View File

@ -2,19 +2,29 @@
lotus --version
# # remove old bootnode peer info if present
# [ -f /root/.lotus-shared/miner.addr ] && rm /root/.lotus-shared/miner.addr
# remove old bootnode peer info if present
if [ -f /root/.lotus-shared/miner.addr ]; then
rm /root/.lotus-shared/miner.addr
fi
##TODO: generate genesis files inside container instead of bundling in config dir
##something like commands below should work, other scripts/compose will have to be updated to corresponding directories
# lotus fetch-params 2048
# lotus-seed pre-seal --sector-size 2KiB --num-sectors 2
# lotus-seed genesis new localnet.json
# lotus-seed genesis add-miner localnet.json ~/.genesis-sectors/pre-seal-t01000.json
# Check if filecoin-proof-parameters exist; avoid fetching if they do
if [ -z "$(find "/var/tmp/filecoin-proof-parameters" -maxdepth 1 -type f)" ]; then
echo "Proof params not found, fetching..."
lotus fetch-params 2048
else
echo "Existing proof params found"
fi
# if genesis is not already setup
if [ ! -f /root/data/localnet.json ]; then
lotus-seed --sector-dir /root/data/.genesis-sectors pre-seal --sector-size 2KiB --num-sectors 2
lotus-seed --sector-dir /root/data/.genesis-sectors genesis new /root/data/localnet.json
lotus-seed --sector-dir /root/data/.genesis-sectors genesis add-miner /root/data/localnet.json /root/data/.genesis-sectors/pre-seal-t01000.json
fi
# start daemon
nohup lotus daemon --genesis=/devgen.car --profile=bootstrapper --bootstrap=false > /var/log/lotus.log 2>&1 &
# /root/.lotus-shared/devgen.car path
nohup lotus daemon --lotus-make-genesis=/root/.lotus-shared/devgen.car --profile=bootstrapper --genesis-template=/root/data/localnet.json --bootstrap=false > /var/log/lotus.log 2>&1 &
# Loop until the daemon is started
echo "Waiting for daemon to start..."
@ -23,16 +33,23 @@ while ! grep -q "started ChainNotify channel" /var/log/lotus.log ; do
done
echo "Daemon started."
# publish bootnode peer info to shared volume
lotus net listen | awk 'NR==1{print}' > /root/.lotus-shared/miner.addr
# copy genesis file to shared volume
cp /devgen.car /root/.lotus-shared
# if miner not already initialized
if [ ! -d /root/.lotus-miner-local-net ]; then
if [ ! -d $LOTUS_MINER_PATH ]; then
# initialize miner
lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
lotus wallet import --as-default /root/data/.genesis-sectors/pre-seal-t01000.key
# fund a known account for usage
/fund-account.sh
lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=/root/data/.genesis-sectors --pre-sealed-metadata=/root/data/.genesis-sectors/pre-seal-t01000.json --nosync
fi
# publish bootnode peer info to shared volume
lotus net listen | grep "$(ip addr | grep inet | grep -v '127.0.0.1' | sort | head -1 | awk '{print $2}' | cut -d '/' -f1)" | head -1 > /root/.lotus-shared/miner.addr
# start miner
nohup lotus-miner run --nosync &

View File

@ -2,23 +2,25 @@
lotus --version
##TODO: paths can use values from lotus-env.env file
# Loop until the daemon is started
echo "Waiting for miner to share peering info..."
while [ ! -f /root/.lotus-shared/miner.addr ]; do
sleep 5
done
echo "Resuming..."
# if not already initialized
if [ ! -f /root/.lotus-local-net/config.toml ]; then
# init node config
mkdir $HOME/.lotus-local-net
lotus config default > $HOME/.lotus-local-net/config.toml
mkdir -p $LOTUS_PATH
lotus config default > $LOTUS_PATH/config.toml
# add bootstrap peer info if available
if [ -f /root/.lotus-shared/miner.addr ]; then
MINER_ADDR=\"$(cat /root/.lotus-shared/miner.addr)\"
# add bootstrap peer id to config file
sed -i "/^\[Libp2p\]/a \ \ BootstrapPeers = [$MINER_ADDR]" $HOME/.lotus-local-net/config.toml
sed -i "/^\[Libp2p\]/a \ \ BootstrapPeers = [$MINER_ADDR]" $LOTUS_PATH/config.toml
else
echo "Bootstrap peer info not found, unable to configure. Manual peering will be required."
fi
fi
# start node
lotus daemon --genesis=/devgen.car
lotus daemon --genesis=/root/.lotus-shared/devgen.car

View File

@ -0,0 +1,20 @@
module.exports = {
network: 'lotus-fixturenet',
v3: {
factory: {
address: 'FACTORY_ADDRESS',
startBlock: FACTORY_BLOCK
},
positionManager: {
address: 'NFPM_ADDRESS',
startBlock: NFPM_BLOCK
},
native: { address: 'NATIVE_ADDRESS' },
whitelistedTokenAddresses: [
'NATIVE_ADDRESS',
],
stableTokenAddresses: [
],
minimumEthLocked: 1.5
}
}

View File

@ -0,0 +1,39 @@
#!/bin/bash
set -e
# Loop until the NFPM deployment is detected
echo "Waiting for sushiswap-periphery deployments to occur"
while [ ! -f ./deployments/docker/NonfungiblePositionManager.json ]; do
sleep 5
done
echo "Reading contract addresses and block numbers from deployments"
FACTORY_ADDRESS=$(jq -r '.address' ./core-deployments/docker/UniswapV3Factory.json)
FACTORY_BLOCK=$(jq -r '.receipt.blockNumber' ./core-deployments/docker/UniswapV3Factory.json)
NATIVE_ADDRESS=$(jq -r '.address' ./deployments/docker/WFIL.json)
NFPM_ADDRESS=$(jq -r '.address' ./deployments/docker/NonfungiblePositionManager.json)
NFPM_BLOCK=$(jq -r '.receipt.blockNumber' ./deployments/docker/NonfungiblePositionManager.json)
# Read the JavaScript file content
file_content=$(</app/config/lotus-fixturenet.js.template)
# Replace uppercase words with environment variables
echo "Reading values in lotus-fixturenet config"
replaced_content=$(echo "$file_content" | sed -e "s/FACTORY_ADDRESS/$FACTORY_ADDRESS/g" \
-e "s/FACTORY_BLOCK/$FACTORY_BLOCK/g" \
-e "s/NFPM_ADDRESS/$NFPM_ADDRESS/g" \
-e "s/NFPM_BLOCK/$NFPM_BLOCK/g" \
-e "s/NATIVE_ADDRESS/$NATIVE_ADDRESS/g")
# Write the replaced content back to the JavaScript file
echo "$replaced_content" > /app/config/lotus-fixturenet.js
echo "Building subgraph and deploying to graph-node..."
pnpm run generate
pnpm run build
pnpm exec graph create --node http://graph-node:8020/ sushiswap/v3-lotus
pnpm exec graph deploy --node http://graph-node:8020/ --ipfs http://ipfs:5001 --version-label 0.1.0 sushiswap/v3-lotus
echo "Done"

View File

@ -0,0 +1,39 @@
[server]
host = "0.0.0.0"
port = 3001
mode = "eth_call"
kind = "lazy"
[metrics]
host = "127.0.0.1"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "erc20-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/erc20-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000

View File

@ -0,0 +1,6 @@
# Lotus node config
ETH_RPC_ENDPOINT="http://lotus-node-1:1234/rpc/v1"
CHAIN_ID=31415926
# From app/data/config/fixturenet-lotus/fund-account.sh
ACCOUNT_PRIVATE_KEY="0xc05fd3613bcd62a4f25e5eba1f464d0b76d74c3f771a7c2f13e26ad6439444b3"

View File

@ -0,0 +1,45 @@
[server]
host = "0.0.0.0"
port = 3004
# Use mode demo when running watcher locally.
# Mode demo whitelists all tokens so that entity values get updated.
mode = "demo"
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-info-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[upstream.uniWatcher]
gqlEndpoint = "http://sushi-watcher-server:3003/graphql"
gqlSubscriptionEndpoint = "ws://sushi-watcher-server:3003/graphql"
[upstream.tokenWatcher]
gqlEndpoint = "http://erc20-watcher-server:3001/graphql"
gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql"
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-info-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 1000
eventsInBatch = 50
subgraphEventsOrder = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,90 @@
[server]
host = "0.0.0.0"
port = 3004
mode = "demo"
kind = "active"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 50000
# Enable state creation
enableState = false
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = 1000
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
# Boolean to skip updating entity fields required in state creation and not required in the frontend.
skipStateFieldsUpdate = false
# Boolean to load GQL query nested entity relations sequentially.
loadRelationsSequential = false
# Max GQL API requests to process simultaneously (defaults to 1).
maxSimultaneousRequests = 1
# GQL cache settings
[server.gqlCache]
enabled = true
# Max in-memory cache size (in bytes) (default 8 MB)
# maxCacheSize
# GQL cache-control max-age settings (in seconds)
maxAge = 15
timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
port = 9002
[metrics.gql]
port = 9003
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-info-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[database.extra]
# maximum number of clients the pool should contain
max = 20
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[upstream.uniWatcher]
gqlEndpoint = "http://sushi-watcher-server:3003/graphql"
gqlSubscriptionEndpoint = "ws://sushi-watcher-server:3003/graphql"
[upstream.tokenWatcher]
gqlEndpoint = "http://erc20-watcher-server:3001/graphql"
gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql"
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-info-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 1000
eventsInBatch = 50
subgraphEventsOrder = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,34 @@
[server]
host = "0.0.0.0"
port = 3003
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 0
eventsInBatch = 50
lazyUpdateBlockProgress = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,41 @@
[server]
host = "0.0.0.0"
port = 3003
kind = "active"
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 0
eventsInBatch = 50
lazyUpdateBlockProgress = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
# Build a local version of the graphprotocol/graph-node image (among reasons: the upstream image is not built for arm)
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/graph-node:local -f ${CERC_REPO_BASE_DIR}/graph-node/docker/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/graph-node

View File

@ -1,7 +1,7 @@
ARG TAG_SUFFIX="-modern"
FROM sigp/lighthouse:v4.3.0${TAG_SUFFIX}
RUN apt-get update; apt-get install bash netcat curl less jq -y;
RUN apt-get update; apt-get install bash netcat curl less jq wget -y;
WORKDIR /root/
ADD start-lighthouse.sh .

View File

@ -1,5 +1,5 @@
#####################################
FROM golang:1.19.7-buster AS lotus-builder
FROM golang:1.19.12-bullseye AS lotus-builder
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
@ -59,7 +59,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc \
@ -98,6 +98,9 @@ CMD ["-help"]
#####################################
FROM lotus-base AS lotus-all-in-one
# Install netcat for healthcheck
RUN apt-get update && apt-get install -y netcat && apt-get install -y iproute2
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus

View File

@ -3,8 +3,8 @@
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# Per lotus docs, 'releases' branch always contains latest stable release
git -C ${CERC_REPO_BASE_DIR}/lotus checkout releases
# Use a release version tag to match the modified Dockerfile replaced in next step
git -C ${CERC_REPO_BASE_DIR}/lotus checkout v1.23.3
# Replace repo's Dockerfile with modified one
cp ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/lotus/Dockerfile

View File

@ -0,0 +1,11 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git alpine-sdk bash jq
RUN curl -L https://unpkg.com/@pnpm/self-installer | node
WORKDIR /app
COPY . .
RUN echo "Installing dependencies..." && \
pnpm install

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/sushiswap-subgraphs
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/sushiswap-subgraphs:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/subgraphs

View File

@ -0,0 +1,14 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash jq
RUN curl -L https://unpkg.com/@pnpm/self-installer | node
WORKDIR /app
COPY . .
RUN echo "Installing dependencies..." && \
pnpm install
# Keep container running for commands to be executed
CMD ["tail", "-f"]

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/sushiswap-v3-core
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/sushiswap-v3-core:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-core

View File

@ -0,0 +1,13 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash jq
WORKDIR /app
COPY . .
RUN echo "Installing dependencies..." && \
yarn install
# Keep container running for commands to be executed
CMD ["tail", "-f"]

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/sushiswap-v3-periphery
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/sushiswap-v3-periphery:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-periphery

View File

@ -0,0 +1,10 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash
WORKDIR /app
COPY . .
RUN echo "Building uniswap-watcher-ts" && \
yarn && yarn build && yarn build:contracts

View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Build cerc/watcher-sushiswap
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# See: https://stackoverflow.com/a/246128/1701505
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/watcher-sushiswap:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-watcher-ts

View File

@ -44,3 +44,8 @@ cerc/lotus
cerc/go-opera
cerc/lasso
cerc/reth
cerc/sushiswap-v3-core
cerc/sushiswap-v3-periphery
cerc/watcher-sushiswap
cerc/graph-node
cerc/sushiswap-subgraphs

View File

@ -30,3 +30,5 @@ fixturenet-lotus
mainnet-go-opera
lasso
reth
watcher-sushiswap
contract-sushiswap

View File

@ -39,3 +39,7 @@ github.com/cerc-io/lasso
github.com/paradigmxyz/reth
git.vdb.to/cerc-io/plugeth
git.vdb.to/cerc-io/plugeth-statediff
github.com/cerc-io/sushiswap-v3-core
github.com/cerc-io/sushiswap-v3-periphery
github.com/graphprotocol/graph-node
github.com/sushiswap/subgraphs

View File

@ -55,7 +55,7 @@ This should create the required docker images in the local image registry.
## Clean up
Stop all the services running in background run:
Stop all the services running in background:
```bash
laconic-so --stack azimuth deploy-system down

View File

@ -1,4 +1,4 @@
# fixturenet-eth
# fixturenet-eth-loaded
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.

View File

@ -0,0 +1,3 @@
# Graph-Node Fixturenet
Experimental

View File

@ -0,0 +1,12 @@
version: "1.0"
name: fixturenet-graph-node
description: "A graph-node fixturenet"
repos:
- github.com/filecoin-project/lotus
- github.com/graphprotocol/graph-node
containers:
- cerc/lotus
- cerc/graph-node
pods:
- fixturenet-lotus
- fixturenet-graph-node

View File

@ -12,17 +12,36 @@ $ laconic-so --stack fixturenet-lotus build-containers
```
## 3. Deploy the stack
```
$ laconic-so --stack fixturenet-lotus deploy up
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus up
```
Note: When running for the first time (or after clean up), the services will take some time to start properly as the Lotus nodes download the proof params (which are persisted to volumes)
Correct operation should be verified by checking the container logs with:
```
$ laconic-so --stack fixturenet-lotus deploy logs lotus-miner
$ laconic-so --stack fixturenet-lotus deploy logs lotus-node-1
$ laconic-so --stack fixturenet-lotus deploy logs lotus-node-2
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-miner
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-node-1
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-node-2
```
or by checking the chain status on each node:
```
$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus status"
$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-1 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-2 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-miner "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-node-1 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-node-2 "lotus status"
```
## 4. Clean up
Stop all the services running in background:
```
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus down
```
Clear volumes created by this stack:
```
# List all relevant volumes
$ docker volume ls -q --filter "name=lotus"
# Remove all the listed volumes
$ docker volume rm $(docker volume ls -q --filter "name=lotus")
```

View File

@ -2,7 +2,7 @@ version: "1.2"
name: fixturenet-plugeth-tx
decription: "plugeth Ethereum Fixturenet w/ tx-spammer"
repos:
- git.vdb.to/cerc-io/plugeth@statediff-wip
- git.vdb.to/cerc-io/plugeth@statediff
- git.vdb.to/cerc-io/plugeth-statediff
- github.com/cerc-io/lighthouse
- github.com/cerc-io/ipld-eth-db@v5

View File

@ -70,12 +70,41 @@ To permanently *delete* the stack's data volumes run:
$ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes
```
After deleting the volumes, any subsequent re-start will begin chain sync from cold.
## Ports
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
Any ports defined in the Docker compose file are exposed by default with random port assignments, but the values can be
customized by editing the "spec" file generated by `laconic-so deploy init`.
In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while
port `40000` has been left with random assignment:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
```
## Data volumes
Container data volumes are bind-mounted to specified paths in the host filesystem.
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data

View File

@ -93,7 +93,7 @@ Follow the [demo](./demo.md) to try out the MobyMask app with L2 chain
## Clean up
Stop all the services running in background run:
Stop all the services running in background:
```bash
laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 down 30

View File

@ -0,0 +1,111 @@
# SushiSwap Graph
## Setup
Clone required repositories:
```bash
laconic-so --stack sushiswap-subgraph setup-repositories
```
Build the container images:
```bash
laconic-so --stack sushiswap-subgraph build-containers
```
## Deploy
Deploy the stack:
```bash
laconic-so --stack sushiswap-subgraph deploy --cluster sushigraph up
```
After all services have started, wait and check that the subgraph has been deployed to graph-node
```bash
laconic-so --stack sushiswap-subgraph deploy --cluster sushigraph logs -f sushiswap-subgraph-v3
# Expected end output
# ...
# sushigraph-sushiswap-subgraph-v3-1 | - Deploying to Graph node http://graph-node:8020/
# sushigraph-sushiswap-subgraph-v3-1 | Deployed to http://graph-node:8000/subgraphs/name/sushiswap/v3-lotus/graphql
# sushigraph-sushiswap-subgraph-v3-1 |
# sushigraph-sushiswap-subgraph-v3-1 | Subgraph endpoints:
# sushigraph-sushiswap-subgraph-v3-1 | Queries (HTTP): http://graph-node:8000/subgraphs/name/sushiswap/v3-lotus
# sushigraph-sushiswap-subgraph-v3-1 |
# sushigraph-sushiswap-subgraph-v3-1 | Done
```
## Run
To check graph-node logs:
```bash
laconic-so --stack sushiswap-subgraph deploy --cluster sushigraph logs -f graph-node
```
To deploy tokens run:
```bash
docker exec -it sushigraph-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
```
This can be run multiple times to deploy ERC20 tokens
Take note of the deployed token addresses to use later
Get contract address of factory deployed:
```bash
docker exec -it sushigraph-sushiswap-v3-core-1 jq -r '.address' /app/deployments/docker/UniswapV3Factory.json
```
Set it to environment variable `FACTORY_ADDRESS` to use later
To create a pool:
```bash
docker exec -it sushigraph-sushiswap-v3-core-1 pnpm run pool:create:docker --factory $FACTORY_ADDRESS --token0 $TOKEN1_ADDRESS --token1 $TOKEN2_ADDRESS --fee 500
```
Set the created pool address to environment variable `POOL_ADDRESS` to use later
To initialize pool:
```bash
docker exec -it sushigraph-sushiswap-v3-core-1 pnpm run pool:initialize:docker --sqrt-price 4295128939 --pool $POOL_ADDRESS
```
Set the recipient address to the contract deployer:
```bash
export RECIPIENT=0xD375B03bd3A2434A9f675bEC4Ccd68aC5e67C743
```
Trigger pool mint event:
```bash
docker exec -it sushigraph-sushiswap-v3-core-1 pnpm run pool:mint:docker --pool $POOL_ADDRESS --recipient $RECIPIENT --amount 10
```
Trigger pool burn event:
```bash
docker exec -it sushigraph-sushiswap-v3-core-1 pnpm run pool:burn:docker --pool $POOL_ADDRESS --amount 10
```
## Clean up
Stop all the services running in background run:
```bash
laconic-so --stack sushiswap-subgraph deploy --cluster sushigraph down
```
Clear volumes created by this stack:
```bash
# List all relevant volumes
docker volume ls -q --filter "name=sushigraph"
# Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=sushigraph")
# WARNING: After removing volumes with Lotus params
# They will be downloaded again on restart
# To remove volumes that do not contain Lotus params
docker volume rm $(docker volume ls -q --filter "name=sushigraph" | grep -v "params$")
```

View File

@ -0,0 +1,28 @@
version: "1.0"
name: sushiswap-subgraph
description: "An end-to-end SushiSwap Subgraph stack"
repos:
## fixturenet-lotus repo
- github.com/filecoin-project/lotus
## graph-node repo
- github.com/graphprotocol/graph-node
## sushiswap repos
- github.com/cerc-io/sushiswap-v3-core@watcher-ts
- github.com/cerc-io/sushiswap-v3-periphery@watcher-ts
## subgraph repo
- github.com/sushiswap/subgraphs
containers:
## fixturenet-lotus image
- cerc/lotus
## fixturenet-graph-node image
- cerc/graph-node
## sushiswap contract deployment images
- cerc/sushiswap-v3-core
- cerc/sushiswap-v3-periphery
## sushiswap subgraphs image
- cerc/sushiswap-subgraphs
pods:
- fixturenet-lotus
- fixturenet-graph-node
- contract-sushiswap
- sushiswap-subgraph-v3

View File

@ -0,0 +1,47 @@
# SushiSwap
## Setup
Clone required repositories:
```bash
laconic-so --stack sushiswap setup-repositories --git-ssh
```
Build the container images:
```bash
laconic-so --stack sushiswap build-containers
```
## Deploy
Deploy the stack:
```bash
laconic-so --stack sushiswap deploy --cluster sushiswap up
```
Note: When running for the first time (or after clean up), the services will take some time to start as Lotus nodes in the fixturenet download the proof params
## Tests
Follow [smoke-tests.md](./smoke-tests.md) to run smoke tests
## Clean up
Stop all the services running in background:
```bash
laconic-so --stack sushiswap deploy --cluster sushiswap down
```
Clear volumes created by this stack:
```bash
# List all relevant volumes
docker volume ls -q --filter "name=sushiswap"
# Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=sushiswap")
```

View File

@ -0,0 +1,71 @@
# SushiSwap Watcher Smoke Tests
## sushi-watcher
Deploy required contracts and set the addresses to variables:
```bash
# Deploy UniswapV3Factory
docker exec -it sushiswap-sushiswap-v3-core-1 pnpm hardhat --network docker deploy --tags UniswapV3Factory
# Set the returned address to a variable
export FACTORY_ADDRESS=<FACTORY_ADDRESS>
# Deploy TestUniswapV3Callee
docker exec -it sushiswap-sushiswap-v3-core-1 pnpm hardhat --network docker deploy --tags TestUniswapV3Callee
# Set the returned address to a variable
export UNISWAP_CALLEE_ADDRESS=<UNISWAP_CALLEE_ADDRESS>
# Deploy NFPM contract
docker exec -it sushiswap-sushiswap-v3-periphery-1 bash -c "export FACTORY_ADDRESS=$FACTORY_ADDRESS && yarn hardhat --network docker deploy --tags NonfungiblePositionManager"
# Set the returned address to a variable
export POSITION_MANAGER_ADDRESS=<POSITION_MANAGER_ADDRESS>
# Deploy two test tokens
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
# Set the returned addresses to variables
export TOKEN0_ADDRESS=<TOKEN0_ADDRESS>
export TOKEN1_ADDRESS=<TOKEN1_ADDRESS>
```
Watch the contracts:
```bash
# Watch factory contract
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "yarn watch:contract --address $FACTORY_ADDRESS --kind factory --startingBlock 100 --checkpoint false"
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "yarn watch:contract --address $FACTORY_ADDRESS --kind factory --startingBlock 100 --checkpoint false"
# Watch NFPM contract
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "yarn watch:contract --address $POSITION_MANAGER_ADDRESS --kind nfpm --startingBlock 100 --checkpoint false"
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "yarn watch:contract --address $POSITION_MANAGER_ADDRESS --kind nfpm --startingBlock 100 --checkpoint false"
```
Run the smoke test:
```bash
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "export TOKEN0_ADDRESS=$TOKEN0_ADDRESS && export TOKEN1_ADDRESS=$TOKEN1_ADDRESS && export UNISWAP_CALLEE_ADDRESS=$UNISWAP_CALLEE_ADDRESS && yarn smoke-test"
```
## sushi-info-watcher
Deploy required contracts and set the addresses to variables:
```bash
# Deploy two test tokens
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
# Set the returned addresses to variables
export TOKEN0_ADDRESS=<TOKEN0_ADDRESS>
export TOKEN1_ADDRESS=<TOKEN1_ADDRESS>
```
Run the smoke test:
```bash
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "export TOKEN0_ADDRESS=$TOKEN0_ADDRESS && export TOKEN1_ADDRESS=$TOKEN1_ADDRESS && export UNISWAP_CALLEE_ADDRESS=$UNISWAP_CALLEE_ADDRESS && yarn smoke-test"
```

View File

@ -0,0 +1,22 @@
version: "1.0"
name: sushiswap
description: "End-to-end SushiSwap watcher stack"
repos:
## fixturenet-lotus repo
- github.com/filecoin-project/lotus
## sushiswap repos
- github.com/cerc-io/sushiswap-v3-core@watcher-ts
- github.com/cerc-io/sushiswap-v3-periphery@watcher-ts
- github.com/vulcanize/uniswap-watcher-ts@sushiswap
- github.com/vulcanize/uniswap-v3-info
containers:
## fixturenet-lotus image
- cerc/lotus
## sushiswap images
- cerc/sushiswap-v3-core
- cerc/sushiswap-v3-periphery
- cerc/watcher-sushiswap
- cerc/uniswap-v3-info
pods:
- fixturenet-lotus
- watcher-sushiswap

View File

@ -28,6 +28,20 @@ from app.deploy_types import DeploymentContext, LaconicStackSetupCommand
def _make_default_deployment_dir():
return "deployment-001"
def _get_ports(stack):
ports = {}
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items():
if "ports" in svc:
# Ports can appear as strings or numbers. We normalize them as strings.
ports[svc_name] = [ str(x) for x in svc["ports"] ]
return ports
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
@ -81,6 +95,12 @@ def _fixup_pod_file(pod, spec, compose_dir):
}
}
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec:
spec_ports = spec["ports"]
for container_name, container_ports in spec_ports.items():
if container_name in pod["services"]:
pod["services"][container_name]["ports"] = container_ports
def call_stack_deploy_init(deploy_command_context):
@ -149,12 +169,18 @@ def init(ctx, output):
spec_file_content.update(default_spec_file_content)
if verbose:
print(f"Creating spec file for stack: {stack}")
ports = _get_ports(stack)
if ports:
spec_file_content["ports"] = ports
named_volumes = _get_named_volumes(stack)
if named_volumes:
volume_descriptors = {}
for named_volume in named_volumes:
volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file)

View File

@ -52,7 +52,7 @@ laconic-so version
1. Get the repositories
```
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include cerc-io/laconicd,cerc-io/laconic-sdk,cerc-io/laconic-registry-cli,cerc-io/laconic-console
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include github.com/cerc-io/laconicd,github.com/cerc-io/laconic-sdk,github.com/cerc-io/laconic-registry-cli,github.com/cerc-io/laconic-console
```
2. Set this environment variable to the Laconic self-hosted Gitea instance:
@ -108,6 +108,37 @@ laconic-5cd0a80c1442c3044c8b295d26426bae-laconicd-1 | 9:30PM INF indexed
laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status"
```
```
{
"version": "0.3.0",
"node": {
"id": "4216af2ac9f68bda33a38803fc1b5c9559312c1d",
"network": "laconic_9000-1",
"moniker": "localtestnet"
},
"sync": {
"latest_block_hash": "1BDF4CB9AE2390DA65BCF997C83133C18014FCDDCAE03708488F0B56FCEEA429",
"latest_block_height": "5",
"latest_block_time": "2023-08-09 16:00:30.386903172 +0000 UTC",
"catching_up": false
},
"validator": {
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
"voting_power": "1000000000000000"
},
"validators": [
{
"address": "651FBC700B747C76E90ACFC18CC9508C3D0905B9",
"voting_power": "1000000000000000",
"proposer_priority": "0"
}
],
"num_peers": "0",
"peers": [],
"disk_usage": "292.0K"
}
```
## Configure Digital Ocean firewall
Let's open some ports.