Merge branch 'main' into telackey/ports

# Conflicts:
#	app/deployment_create.py
This commit is contained in:
Thomas E Lackey 2023-08-11 15:27:23 -05:00
commit 2de98a1ad9
30 changed files with 808 additions and 60 deletions

View File

@ -0,0 +1,45 @@
services:
graph-node:
image: cerc/graph-node:local
environment:
ipfs: ipfs:5001
postgres_host: db
postgres_port: 5432
postgres_user: graph-node
postgres_pass: password
postgres_db: graph-node
ports:
- "8000"
- "8001"
- "8020"
- "8030"
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
volumes:
- ipfs-import:/import
- ipfs-data:/data/ipfs
ports:
- "8080"
- "4001"
- "5001"
db:
image: postgres:14-alpine
volumes:
- db-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: "graph-node"
POSTGRES_DB: "graph-node"
POSTGRES_PASSWORD: "password"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- "5432"
volumes:
ipfs-import:
ipfs-data:
db-data:

View File

@ -30,6 +30,12 @@ services:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- lotus_node_1_params:/var/tmp/filecoin-proof-parameters
- lotus-shared:/root/.lotus-shared
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "1234"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- lotus-miner
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
@ -51,6 +57,12 @@ services:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- lotus_node_2_params:/var/tmp/filecoin-proof-parameters
- lotus-shared:/root/.lotus-shared
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "1234"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- lotus-miner
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]

View File

@ -0,0 +1,187 @@
version: '3.2'
services:
sushiswap-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=erc20-watcher,sushi-watcher,sushi-info-watcher,erc20-watcher-job-queue,sushi-watcher-job-queue,sushi-info-watcher-job-queue
- POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto,sushi-watcher-job-queue:pgcrypto,sushi-info-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "work_mem=2GB"]
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- ../config/postgresql/create-pg-stat-statements.sql:/docker-entrypoint-initdb.d/create-pg-stat-statements.sql
- sushiswap_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15435:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
shm_size: '8GB'
erc20-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/erc20-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports:
- "0.0.0.0:3005:3001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-watcher-job-runner:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
lotus-node-1:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/uni-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/job-runner.js"]
volumes:
- ../config/watcher-sushiswap/sushi-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-watcher-test.toml:/app/packages/uni-watcher/environments/test.toml
ports:
- "0.0.0.0:9004:9000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
sushi-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-sushiswap:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
environment:
- DEBUG=vulcanize:*
working_dir: /app/packages/uni-watcher
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/sushi-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-watcher-test.toml:/app/packages/uni-watcher/environments/test.toml
ports:
- "0.0.0.0:3003:3003"
- "0.0.0.0:9005:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3003"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-info-watcher-job-runner:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
lotus-node-1:
condition: service_healthy
sushi-watcher-server:
condition: service_healthy
image: cerc/watcher-sushiswap:local
working_dir: /app/packages/uni-info-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/job-runner.js"]
volumes:
- ../config/watcher-sushiswap/sushi-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-info-watcher-test.toml:/app/packages/uni-info-watcher/environments/test.toml
ports:
- "0.0.0.0:9006:9002"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushi-info-watcher-server:
restart: unless-stopped
depends_on:
sushiswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
sushi-watcher-server:
condition: service_healthy
sushi-info-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-sushiswap:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
working_dir: /app/packages/uni-info-watcher
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-sushiswap/sushi-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
- ../config/watcher-sushiswap/sushi-info-watcher-test.toml:/app/packages/uni-info-watcher/environments/test.toml
ports:
- "0.0.0.0:3004:3004"
- "0.0.0.0:9007:9003"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
sushiswap-v3-info:
depends_on:
sushi-info-watcher-server:
condition: service_healthy
image: cerc/uniswap-v3-info:local
ports:
- "0.0.0.0:3006:3000"
# Deploys the core (UniswapV3Factory) contract
sushiswap-v3-core:
image: cerc/sushiswap-v3-core:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
# Deploys the periphery (NFPM, token, etc.) contracts
sushiswap-v3-periphery:
image: cerc/sushiswap-v3-periphery:local
env_file:
- ../config/watcher-sushiswap/lotus-params.env
volumes:
sushiswap_watcher_db_data:

View File

@ -33,7 +33,8 @@ echo "Daemon started."
cp /devgen.car /root/.lotus-shared
# publish bootnode peer info to shared volume
lotus net listen | awk 'NR==2{print}' > /root/.lotus-shared/miner.addr
# TODO: Improve exporting public address to shared volume
lotus net listen | awk 'NR==4{print}' > /root/.lotus-shared/miner.addr
# if miner not already initialized
if [ ! -d $LOTUS_MINER_PATH ]; then

View File

@ -0,0 +1,39 @@
[server]
host = "0.0.0.0"
port = 3001
mode = "eth_call"
kind = "lazy"
[metrics]
host = "127.0.0.1"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "erc20-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/erc20-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000

View File

@ -0,0 +1,6 @@
# Lotus node config
ETH_RPC_ENDPOINT="http://lotus-node-1:1234/rpc/v1"
CHAIN_ID=31415926
# From app/data/config/fixturenet-lotus/fund-account.sh
ACCOUNT_PRIVATE_KEY="0xc05fd3613bcd62a4f25e5eba1f464d0b76d74c3f771a7c2f13e26ad6439444b3"

View File

@ -0,0 +1,45 @@
[server]
host = "0.0.0.0"
port = 3004
# Use mode demo when running watcher locally.
# Mode demo whitelists all tokens so that entity values get updated.
mode = "demo"
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-info-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[upstream.uniWatcher]
gqlEndpoint = "http://sushi-watcher-server:3003/graphql"
gqlSubscriptionEndpoint = "ws://sushi-watcher-server:3003/graphql"
[upstream.tokenWatcher]
gqlEndpoint = "http://erc20-watcher-server:3001/graphql"
gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql"
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-info-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 1000
eventsInBatch = 50
subgraphEventsOrder = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,90 @@
[server]
host = "0.0.0.0"
port = 3004
mode = "demo"
kind = "active"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 50000
# Enable state creation
enableState = false
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = 1000
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
# Boolean to skip updating entity fields required in state creation and not required in the frontend.
skipStateFieldsUpdate = false
# Boolean to load GQL query nested entity relations sequentially.
loadRelationsSequential = false
# Max GQL API requests to process simultaneously (defaults to 1).
maxSimultaneousRequests = 1
# GQL cache settings
[server.gqlCache]
enabled = true
# Max in-memory cache size (in bytes) (default 8 MB)
# maxCacheSize
# GQL cache-control max-age settings (in seconds)
maxAge = 15
timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
port = 9002
[metrics.gql]
port = 9003
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-info-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[database.extra]
# maximum number of clients the pool should contain
max = 20
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[upstream.uniWatcher]
gqlEndpoint = "http://sushi-watcher-server:3003/graphql"
gqlSubscriptionEndpoint = "ws://sushi-watcher-server:3003/graphql"
[upstream.tokenWatcher]
gqlEndpoint = "http://erc20-watcher-server:3001/graphql"
gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql"
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-info-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 1000
eventsInBatch = 50
subgraphEventsOrder = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,34 @@
[server]
host = "0.0.0.0"
port = 3003
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 0
eventsInBatch = 50
lazyUpdateBlockProgress = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,41 @@
[server]
host = "0.0.0.0"
port = 3003
kind = "active"
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "sushiswap-watcher-db"
port = 5432
database = "sushi-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
rpcProviderEndpoint = "http://lotus-node-1:1234/rpc/v1"
rpcClient = true
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@sushiswap-watcher-db:5432/sushi-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 0
eventsInBatch = 50
lazyUpdateBlockProgress = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = false
prefetchBlockCount = 10

View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
# Build a local version of the graphprotocol/graph-node image (among reasons: the upstream image is not built for arm)
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/graph-node:local -f ${CERC_REPO_BASE_DIR}/graph-node/docker/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/graph-node

View File

@ -1,5 +1,5 @@
#####################################
FROM golang:1.19.7-buster AS lotus-builder
FROM golang:1.19.12-bullseye AS lotus-builder
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
@ -59,7 +59,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc \
@ -98,6 +98,9 @@ CMD ["-help"]
#####################################
FROM lotus-base AS lotus-all-in-one
# Install netcat for healthcheck
RUN apt-get update && apt-get install -y netcat
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus

View File

@ -3,8 +3,8 @@
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# Per lotus docs, 'releases' branch always contains latest stable release
git -C ${CERC_REPO_BASE_DIR}/lotus checkout releases
# Use a release version tag to match the modified Dockerfile replaced in next step
git -C ${CERC_REPO_BASE_DIR}/lotus checkout v1.23.3
# Replace repo's Dockerfile with modified one
cp ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/lotus/Dockerfile

View File

@ -0,0 +1,14 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash
RUN curl -L https://unpkg.com/@pnpm/self-installer | node
WORKDIR /app
COPY . .
RUN echo "Installing dependencies..." && \
pnpm install
# Keep container running for commands to be executed
CMD ["tail", "-f"]

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/sushiswap-v3-core
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/sushiswap-v3-core:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-core

View File

@ -0,0 +1,13 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash
WORKDIR /app
COPY . .
RUN echo "Installing dependencies..." && \
yarn install
# Keep container running for commands to be executed
CMD ["tail", "-f"]

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/sushiswap-v3-periphery
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/sushiswap-v3-periphery:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/sushiswap-v3-periphery

View File

@ -0,0 +1,10 @@
FROM node:18.15.0-alpine3.16
RUN apk --update --no-cache add git python3 alpine-sdk bash
WORKDIR /app
COPY . .
RUN echo "Building uniswap-watcher-ts" && \
yarn && yarn build && yarn build:contracts

View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Build cerc/watcher-sushiswap
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# See: https://stackoverflow.com/a/246128/1701505
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/watcher-sushiswap:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-watcher-ts

View File

@ -44,3 +44,7 @@ cerc/lotus
cerc/go-opera
cerc/lasso
cerc/reth
cerc/sushiswap-v3-core
cerc/sushiswap-v3-periphery
cerc/watcher-sushiswap
cerc/graph-node

View File

@ -30,3 +30,4 @@ fixturenet-lotus
mainnet-go-opera
lasso
reth
watcher-sushiswap

View File

@ -39,3 +39,6 @@ github.com/cerc-io/lasso
github.com/paradigmxyz/reth
git.vdb.to/cerc-io/plugeth
git.vdb.to/cerc-io/plugeth-statediff
github.com/cerc-io/sushiswap-v3-core
github.com/cerc-io/sushiswap-v3-periphery
github.com/graphprotocol/graph-node

View File

@ -0,0 +1,3 @@
# Graph-Node Fixturenet
Experimental

View File

@ -0,0 +1,10 @@
version: "1.0"
name: fixturenet-graph-node
description: "A graph-node fixturenet"
repos:
- github.com/graphprotocol/graph-node
containers:
- cerc/graph-node
pods:
- fixturenet-graph-node

View File

@ -16,13 +16,13 @@ $ laconic-so --stack fixturenet-lotus deploy --cluster lotus up
```
Correct operation should be verified by checking the container logs with:
```
$ laconic-so --stack fixturenet-lotus deploy logs lotus-miner
$ laconic-so --stack fixturenet-lotus deploy logs lotus-node-1
$ laconic-so --stack fixturenet-lotus deploy logs lotus-node-2
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-miner
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-node-1
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus logs lotus-node-2
```
or by checking the chain status on each node:
```
$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus status"
$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-1 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-2 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-miner "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-node-1 "lotus status"
$ laconic-so --stack fixturenet-lotus deploy --cluster lotus exec lotus-node-2 "lotus status"
```

View File

@ -70,12 +70,41 @@ To permanently *delete* the stack's data volumes run:
$ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes
```
After deleting the volumes, any subsequent re-start will begin chain sync from cold.
## Ports
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
Any ports defined in the Docker compose file are exposed by default with random port assignments, but the values can be
customized by editing the "spec" file generated by `laconic-so deploy init`.
In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while
port `40000` has been left with random assignment:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
```
## Data volumes
Container data volumes are bind-mounted to specified paths in the host filesystem.
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data

View File

@ -0,0 +1,45 @@
# SushiSwap
## Setup
Clone required repositories:
```bash
laconic-so --stack sushiswap setup-repositories --git-ssh
```
Build the container images:
```bash
laconic-so --stack sushiswap build-containers
```
## Deploy
Deploy the stack:
```bash
laconic-so --stack sushiswap deploy --cluster sushiswap up
```
## Tests
Follow [smoke-tests.md](./smoke-tests.md) to run smoke tests
## Clean up
Stop all the services running in background run:
```bash
laconic-so --stack sushiswap deploy --cluster sushiswap down
```
Clear volumes created by this stack:
```bash
# List all relevant volumes
docker volume ls -q --filter "name=sushiswap"
# Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=sushiswap")
```

View File

@ -0,0 +1,71 @@
# SushiSwap Watcher Smoke Tests
## sushi-watcher
Deploy required contracts and set the addresses to variables:
```bash
# Deploy UniswapV3Factory
docker exec -it sushiswap-sushiswap-v3-core-1 pnpm hardhat --network docker deploy --tags UniswapV3Factory
# Set the returned address to a variable
export FACTORY_ADDRESS=<FACTORY_ADDRESS>
# Deploy TestUniswapV3Callee
docker exec -it sushiswap-sushiswap-v3-core-1 pnpm hardhat --network docker deploy --tags TestUniswapV3Callee
# Set the returned address to a variable
export UNISWAP_CALLEE_ADDRESS=<UNISWAP_CALLEE_ADDRESS>
# Deploy NFPM contract
docker exec -it sushiswap-sushiswap-v3-periphery-1 bash -c "export FACTORY_ADDRESS=$FACTORY_ADDRESS && yarn hardhat --network docker deploy --tags NonfungiblePositionManager"
# Set the returned address to a variable
export POSITION_MANAGER_ADDRESS=<POSITION_MANAGER_ADDRESS>
# Deploy two test tokens
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
# Set the returned addresses to variables
export TOKEN0_ADDRESS=<TOKEN0_ADDRESS>
export TOKEN1_ADDRESS=<TOKEN1_ADDRESS>
```
Watch the contracts:
```bash
# Watch factory contract
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "yarn watch:contract --address $FACTORY_ADDRESS --kind factory --startingBlock 100 --checkpoint false"
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "yarn watch:contract --address $FACTORY_ADDRESS --kind factory --startingBlock 100 --checkpoint false"
# Watch NFPM contract
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "yarn watch:contract --address $POSITION_MANAGER_ADDRESS --kind nfpm --startingBlock 100 --checkpoint false"
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "yarn watch:contract --address $POSITION_MANAGER_ADDRESS --kind nfpm --startingBlock 100 --checkpoint false"
```
Run the smoke test:
```bash
docker exec -it sushiswap-sushi-watcher-server-1 bash -c "export TOKEN0_ADDRESS=$TOKEN0_ADDRESS && export TOKEN1_ADDRESS=$TOKEN1_ADDRESS && export UNISWAP_CALLEE_ADDRESS=$UNISWAP_CALLEE_ADDRESS && yarn smoke-test"
```
## sushi-info-watcher
Deploy required contracts and set the addresses to variables:
```bash
# Deploy two test tokens
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
docker exec -it sushiswap-sushiswap-v3-periphery-1 yarn hardhat --network docker deploy --tags TestERC20
# Set the returned addresses to variables
export TOKEN0_ADDRESS=<TOKEN0_ADDRESS>
export TOKEN1_ADDRESS=<TOKEN1_ADDRESS>
```
Run the smoke test:
```bash
docker exec -it sushiswap-sushi-info-watcher-server-1 bash -c "export TOKEN0_ADDRESS=$TOKEN0_ADDRESS && export TOKEN1_ADDRESS=$TOKEN1_ADDRESS && export UNISWAP_CALLEE_ADDRESS=$UNISWAP_CALLEE_ADDRESS && yarn smoke-test"
```

View File

@ -0,0 +1,22 @@
version: "1.0"
name: sushiswap
description: "End-to-end SushiSwap watcher stack"
repos:
## fixturenet-lotus repo
- github.com/filecoin-project/lotus
## sushiswap repos
- github.com/cerc-io/sushiswap-v3-core@watcher-ts
- github.com/cerc-io/sushiswap-v3-periphery@watcher-ts
- github.com/vulcanize/uniswap-watcher-ts@sushiswap
- github.com/vulcanize/uniswap-v3-info
containers:
## fixturenet-lotus image
- cerc/lotus
## sushiswap images
- cerc/sushiswap-v3-core
- cerc/sushiswap-v3-periphery
- cerc/watcher-sushiswap
- cerc/uniswap-v3-info
pods:
- fixturenet-lotus
- watcher-sushiswap

View File

@ -38,16 +38,13 @@ def _get_ports(stack):
if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items():
if "ports" in svc:
normalized = [ str(x) for x in svc["ports"] ]
if pod in ports:
ports[pod][svc_name] = normalized
else:
ports[pod] = { svc_name: normalized }
# Ports can appear as strings or numbers. We normalize them as strings.
ports[svc_name] = [ str(x) for x in svc["ports"] ]
return ports
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
named_volumes = {}
named_volumes = []
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = get_yaml()
@ -56,9 +53,10 @@ def _get_named_volumes(stack):
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
# Volume definition looks like:
# 'laconicd-data': None
named_volumes[pod] = list(volumes.keys())
for volume in volumes.keys():
# Volume definition looks like:
# 'laconicd-data': None
named_volumes.append(volume)
return named_volumes
@ -77,30 +75,31 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod_name, pod, spec, compose_dir):
if pod_name in spec["pods"]:
# Fix up volumes
if "volumes" in spec["pods"][pod_name]:
spec_volumes = spec["pods"][pod_name]["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
"type": "none",
"device": volume_spec_fixedup,
"o": "bind"
}
}
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec["pods"][pod_name]:
for container_name, container_ports in spec["pods"][pod_name]["ports"].items():
pod["services"][container_name]["ports"] = container_ports
def _fixup_pod_file(pod, spec, compose_dir):
# Fix up volumes
if "volumes" in spec:
spec_volumes = spec["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
"type": "none",
"device": volume_spec_fixedup,
"o": "bind"
}
}
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec:
spec_ports = spec["ports"]
for container_name, container_ports in spec_ports.items():
if container_name in pod["services"]:
pod["services"][container_name]["ports"] = container_ports
def call_stack_deploy_init(deploy_command_context):
@ -170,23 +169,17 @@ def init(ctx, output):
if verbose:
print(f"Creating spec file for stack: {stack}")
parsed_stack = get_parsed_stack_config(stack)
pods = dict([ (p, {}) for p in parsed_stack["pods"]])
ports = _get_ports(stack)
if ports:
spec_file_content["ports"] = ports
named_volumes_by_pod = _get_named_volumes(stack)
if named_volumes_by_pod:
for pod_name in named_volumes_by_pod:
volume_descriptors = {}
for named_volume in named_volumes_by_pod[pod_name]:
volume_descriptors[named_volume] = f"./data/{named_volume}"
pods[pod_name]["volumes"] = volume_descriptors
named_volumes = _get_named_volumes(stack)
if named_volumes:
volume_descriptors = {}
for named_volume in named_volumes:
volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
ports_by_pod = _get_ports(stack)
if ports_by_pod:
for pod_name in ports_by_pod:
pods[pod_name]["ports"] = ports_by_pod[pod_name]
spec_file_content["pods"] = pods
with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file)
@ -224,7 +217,7 @@ def create(ctx, spec_file, deployment_dir):
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
if global_options(ctx).debug:
print(f"extra config dirs: {extra_config_dirs}")
_fixup_pod_file(pod, parsed_pod_file, parsed_spec, destination_compose_dir)
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open(os.path.join(destination_compose_dir, os.path.basename(pod_file_path)), "w") as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any