diff --git a/app/data/compose/docker-compose-fixturenet-lotus.yml b/app/data/compose/docker-compose-fixturenet-lotus.yml new file mode 100644 index 00000000..a9056153 --- /dev/null +++ b/app/data/compose/docker-compose-fixturenet-lotus.yml @@ -0,0 +1,83 @@ +version: "3.8" +services: + + lotus-miner: + hostname: lotus-miner + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-miner.sh:/docker-entrypoint-scripts.d/setup-miner.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car + - $HOME/stack-orchestrator/app/data/config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - lotus-local-net-shared:/root/.lotus-local-net + # healthcheck: + # test: ["CMD-SHELL", "grep 'started ChainNotify channel' /var/log/lotus.log"] + # interval: 10s + # timeout: 10s + # retries: 5 + # start_period: 30s +# depends_on: +# - fixturenet-lotus-bootnode + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-miner.sh"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" + + lotus-node-1: + hostname: lotus-node-1 + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car +# - ../config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - ./myscripts/pre-seal-t01000.key:/root/.genesis-sectors/pre-seal-t01000.key +# - ./myscripts/pre-seal-t01000.json:/root/.genesis-sectors/pre-seal-t01000.json +# - lotus-local-net-shared:/root/.lotus-local-net +# healthcheck: +# test: ["CMD", "nc", "-v", "localhost", "8545"] +# interval: 30s +# timeout: 10s +# retries: 10 +# start_period: 3s +# depends_on: +# - fixturenet-lotus-1 + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"] +# entrypoint: ["lotus", "--version"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" + + lotus-node-2: + hostname: lotus-node-2 + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car +# - ../config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - ./myscripts/pre-seal-t01000.key:/root/.genesis-sectors/pre-seal-t01000.key +# - ./myscripts/pre-seal-t01000.json:/root/.genesis-sectors/pre-seal-t01000.json +# - lotus-local-net-shared:/root/.lotus-local-net +# healthcheck: +# test: ["CMD", "nc", "-v", "localhost", "8545"] +# interval: 30s +# timeout: 10s +# retries: 10 +# start_period: 3s +# depends_on: +# - fixturenet-lotus-1 + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"] +# entrypoint: ["lotus", "--version"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/p_aux b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/p_aux new file mode 100644 index 00000000..81030cb4 --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/p_aux @@ -0,0 +1 @@ +}+V{iй\kq  ?Af~쩙Ltbyqč?aӚJ \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/sc-02-data-tree-r-last.dat b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/sc-02-data-tree-r-last.dat new file mode 100644 index 00000000..5af05cae --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/sc-02-data-tree-r-last.dat @@ -0,0 +1 @@ +f~쩙Ltbyqč?aӚJ \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/t_aux b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/t_aux new file mode 100644 index 00000000..09302363 Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-0/t_aux differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/p_aux b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/p_aux new file mode 100644 index 00000000..5ed0bcb8 Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/p_aux differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/sc-02-data-tree-r-last.dat b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/sc-02-data-tree-r-last.dat new file mode 100644 index 00000000..6165cf10 Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/sc-02-data-tree-r-last.dat differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/t_aux b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/t_aux new file mode 100644 index 00000000..1a9f9afc Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/cache/s-t01000-1/t_aux differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.json b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.json new file mode 100644 index 00000000..3a05d4b1 --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.json @@ -0,0 +1,71 @@ +{ + "t01000": { + "ID": "t01000", + "Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9", + "MarketBalance": "0", + "PowerBalance": "0", + "SectorSize": 2048, + "Sectors": [ + { + "CommR": { + "/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf" + }, + "CommD": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "SectorID": 0, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "0", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + }, + { + "CommR": { + "/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq" + }, + "CommD": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "SectorID": 1, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "1", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + } + ] + } +} \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.key b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.key new file mode 100644 index 00000000..53e58628 --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/pre-seal-t01000.key @@ -0,0 +1 @@ +7b2254797065223a22626c73222c22507269766174654b6579223a227446765352695367324733537367673050535979323358796a61494d5870736d64794732423755464c54343d227d \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-0 b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-0 new file mode 100644 index 00000000..56a0f872 Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-0 differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-1 b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-1 new file mode 100644 index 00000000..dc3afe0f Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sealed/s-t01000-1 differ diff --git a/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sectorstore.json b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sectorstore.json new file mode 100644 index 00000000..a49be73f --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/.genesis-sectors/sectorstore.json @@ -0,0 +1,11 @@ +{ + "ID": "f355523e-69d0-4984-bd0e-9588487c6231", + "Weight": 0, + "CanSeal": false, + "CanStore": false, + "MaxStorage": 0, + "Groups": null, + "AllowTo": null, + "AllowTypes": null, + "DenyTypes": null +} \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/genesis/devgen.car b/app/data/config/fixturenet-lotus/genesis/devgen.car new file mode 100644 index 00000000..a22eaed4 Binary files /dev/null and b/app/data/config/fixturenet-lotus/genesis/devgen.car differ diff --git a/app/data/config/fixturenet-lotus/genesis/localnet.json b/app/data/config/fixturenet-lotus/genesis/localnet.json new file mode 100644 index 00000000..06d3123e --- /dev/null +++ b/app/data/config/fixturenet-lotus/genesis/localnet.json @@ -0,0 +1,108 @@ +{ + "NetworkVersion": 18, + "Accounts": [ + { + "Type": "account", + "Balance": "50000000000000000000000000", + "Meta": { + "Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q" + } + } + ], + "Miners": [ + { + "ID": "t01000", + "Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9", + "MarketBalance": "0", + "PowerBalance": "0", + "SectorSize": 2048, + "Sectors": [ + { + "CommR": { + "/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf" + }, + "CommD": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "SectorID": 0, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "0", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + }, + { + "CommR": { + "/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq" + }, + "CommD": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "SectorID": 1, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "1", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + } + ] + } + ], + "NetworkName": "localnet-6d52dae5-ff29-4bac-a45d-f84e6c07564c", + "VerifregRootKey": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + }, + "RemainderAccount": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + } +} \ No newline at end of file diff --git a/app/data/config/fixturenet-lotus/lotus-env.env b/app/data/config/fixturenet-lotus/lotus-env.env new file mode 100644 index 00000000..582b8944 --- /dev/null +++ b/app/data/config/fixturenet-lotus/lotus-env.env @@ -0,0 +1,5 @@ +LOTUS_PATH=~/.lotus-local-net +LOTUS_MINER_PATH=~/.lotus-miner-local-net +LOTUS_SKIP_GENESIS_CHECK=_yes_ +CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__" +CGO_CFLAGS="-D__BLST_PORTABLE__" diff --git a/app/data/config/fixturenet-lotus/setup-miner.sh b/app/data/config/fixturenet-lotus/setup-miner.sh new file mode 100644 index 00000000..802ed8f5 --- /dev/null +++ b/app/data/config/fixturenet-lotus/setup-miner.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +lotus --version +#lotus daemon --genesis=/devgen.car --profile=bootstrapper --bootstrap=false > /var/log/lotus.log 2>&1 +lotus daemon --genesis=/devgen.car --bootstrap=false diff --git a/app/data/config/fixturenet-lotus/setup-node.sh b/app/data/config/fixturenet-lotus/setup-node.sh new file mode 100644 index 00000000..fe9f17d2 --- /dev/null +++ b/app/data/config/fixturenet-lotus/setup-node.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +lotus --version +lotus daemon --genesis=/devgen.car diff --git a/app/data/container-build/cerc-lotus/Dockerfile b/app/data/container-build/cerc-lotus/Dockerfile new file mode 100644 index 00000000..963de704 --- /dev/null +++ b/app/data/container-build/cerc-lotus/Dockerfile @@ -0,0 +1,138 @@ +##################################### +FROM golang:1.19.7-buster AS lotus-builder +MAINTAINER Lotus Development Team + +RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev + +ENV XDG_CACHE_HOME="/tmp" + +### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH \ + RUST_VERSION=1.63.0 + +RUN set -eux; \ + dpkgArch="$(dpkg --print-architecture)"; \ + case "${dpkgArch##*-}" in \ + amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \ + arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \ + *) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \ + esac; \ + url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \ + wget "$url"; \ + echo "${rustupSha256} *rustup-init" | sha256sum -c -; \ + chmod +x rustup-init; \ + ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \ + rm rustup-init; \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ + rustup --version; \ + cargo --version; \ + rustc --version; + +COPY ./ /opt/filecoin +WORKDIR /opt/filecoin + +#RUN scripts/docker-git-state-check.sh + +### make configurable filecoin-ffi build +ARG FFI_BUILD_FROM_SOURCE=0 +ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} + +RUN make clean deps + +ARG RUSTFLAGS="" +ARG GOFLAGS="" + +#RUN make buildall +RUN make 2k + +##################################### +FROM ubuntu:20.04 AS lotus-base +MAINTAINER Lotus Development Team + +# Base resources +COPY --from=lotus-builder /etc/ssl/certs /etc/ssl/certs +COPY --from=lotus-builder /lib/*/libdl.so.2 /lib/ +COPY --from=lotus-builder /lib/*/librt.so.1 /lib/ +COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/ +COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/ +COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/ +COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/ +COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/ +COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/ + +RUN useradd -r -u 532 -U fc \ + && mkdir -p /etc/OpenCL/vendors \ + && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd + +##################################### +FROM lotus-base AS lotus +MAINTAINER Lotus Development Team + +COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ +#COPY scripts/docker-lotus-entrypoint.sh / +#COPY myscripts/setup-node.sh /docker-entrypoint-scripts.d/setup-node.sh + +ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT} +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_PATH /var/lib/lotus +ENV DOCKER_LOTUS_IMPORT_WALLET "" + +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus +VOLUME /var/tmp/filecoin-proof-parameters + +USER fc + +EXPOSE 1234 + +ENTRYPOINT ["/docker-lotus-entrypoint.sh"] + +CMD ["-help"] + +##################################### +FROM lotus-base AS lotus-all-in-one + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV WALLET_PATH /var/lib/lotus-wallet + +COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ + +RUN mkdir /var/tmp/filecoin-proof-parameters +RUN mkdir /var/lib/lotus +RUN mkdir /var/lib/lotus-miner +RUN mkdir /var/lib/lotus-worker +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus +RUN chown fc: /var/lib/lotus-miner +RUN chown fc: /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-wallet + + +#VOLUME /var/tmp/filecoin-proof-parameters +#VOLUME /var/lib/lotus +#VOLUME /var/lib/lotus-miner +#VOLUME /var/lib/lotus-worker +#VOLUME /var/lib/lotus-wallet + +EXPOSE 1234 +EXPOSE 2345 +EXPOSE 3456 +EXPOSE 1777 diff --git a/app/data/container-build/cerc-lotus/build.sh b/app/data/container-build/cerc-lotus/build.sh new file mode 100755 index 00000000..10bbf42c --- /dev/null +++ b/app/data/container-build/cerc-lotus/build.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Build cerc/lotus +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# Per lotus docs, 'releases' branch always contains latest stable release +git -C ${CERC_REPO_BASE_DIR}/lotus checkout releases + +# Replace repo's Dockerfile with modified one +cp ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/lotus/Dockerfile + +docker build -t cerc/lotus:local ${build_command_args} ${CERC_REPO_BASE_DIR}/lotus diff --git a/app/data/container-image-list.txt b/app/data/container-image-list.txt index 0caea48a..8a3266dd 100644 --- a/app/data/container-image-list.txt +++ b/app/data/container-image-list.txt @@ -36,3 +36,4 @@ cerc/optimism-l2geth cerc/optimism-op-batcher cerc/optimism-op-node cerc/optimism-op-proposer +cerc/lotus diff --git a/app/data/pod-list.txt b/app/data/pod-list.txt index f24c9ed0..a598d467 100644 --- a/app/data/pod-list.txt +++ b/app/data/pod-list.txt @@ -24,3 +24,4 @@ tx-spammer kubo foundry fixturenet-optimism +fixturenet-lotus diff --git a/app/data/repository-list.txt b/app/data/repository-list.txt index 0d808d68..8740b123 100644 --- a/app/data/repository-list.txt +++ b/app/data/repository-list.txt @@ -27,3 +27,4 @@ lirewine/sdk telackey/act_runner ethereum-optimism/op-geth ethereum-optimism/optimism +filecoin-project/lotus diff --git a/app/data/stacks/fixturenet-lotus/README.md b/app/data/stacks/fixturenet-lotus/README.md new file mode 100644 index 00000000..ded840cb --- /dev/null +++ b/app/data/stacks/fixturenet-lotus/README.md @@ -0,0 +1,57 @@ +# Lotus Fixturenet + +Instructions for deploying a local Lotus (Filecoin) chain for development and testing purposes using laconic-stack-orchestrator. + +## 1. Clone required repositories +``` +$ laconic-so --stack fixturenet-lotus setup-repositories +``` +## 2. Build the stack's packages and containers +``` +$ laconic-so --stack fixturenet-lotus build-containers +``` +## 3. Deploy the stack +``` +$ laconic-so --stack fixturenet-lotus deploy up +``` +Correct operation should be verified by checking the laconicd container's logs with: +``` +$ laconic-so --stack fixturenet-lotus deploy logs +``` +## 4. Get the multiaddress of miner node +The full nodes will need the multiaddress of the miner node to form a peer connection. Find the miner's multiaddress with: +``` +$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus net listen" +/ip4/192.168.160.4/tcp/44523/p2p/12D3KooWQiLfXiyQQY79Bn4Yhuti2PwekBc6cccp1rFpCo5WssLC +/ip4/127.0.0.1/tcp/44523/p2p/12D3KooWQiLfXiyQQY79Bn4Yhuti2PwekBc6cccp1rFpCo5WssLC +``` +(Your node id will be different) Note the multiaddress and save it for a later step. + +## 5. Start the miner +Import the key: +``` +$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key" +imported key t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q successfully! +``` +Init the miner (this will take several minutes): +``` +$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync" + +... +... +2023-05-08T15:48:32.660Z INFO main lotus-miner/init.go:282 Miner successfully created, you can now start it with 'lotus-miner run' +``` +Start the miner: +``` +$ laconic-so --stack fixturenet-lotus deploy exec lotus-miner "lotus-miner run --nosync" +``` + +## 6. Connect the nodes +Connect each full node to the miner using the multiaddress from step 4. +``` +$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-1 "lotus net connect " +connect 12D3KooWQiLfXiyQQY79Bn4Yhuti2PwekBc6cccp1rFpCo5WssLC: success + +$ laconic-so --stack fixturenet-lotus deploy exec lotus-node-2 "lotus net connect " +connect 12D3KooWQiLfXiyQQY79Bn4Yhuti2PwekBc6cccp1rFpCo5WssLC: success +``` diff --git a/app/data/stacks/fixturenet-lotus/stack.yml b/app/data/stacks/fixturenet-lotus/stack.yml index 968123b1..35617c4b 100644 --- a/app/data/stacks/fixturenet-lotus/stack.yml +++ b/app/data/stacks/fixturenet-lotus/stack.yml @@ -1,9 +1,9 @@ version: "1.0" name: fixturenet-lotus -decription: "Lotus (Filecoin) Fixturenet" +description: "A lotus fixturenet" repos: - - + - filecoin-project/lotus containers: - - + - cerc/lotus pods: - fixturenet-lotus diff --git a/build/lib/app/__init__.py b/build/lib/app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/app/__main__.py b/build/lib/app/__main__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/app/base.py b/build/lib/app/base.py new file mode 100644 index 00000000..f6cf0650 --- /dev/null +++ b/build/lib/app/base.py @@ -0,0 +1,71 @@ +# Copyright © 2022, 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import os +from abc import ABC, abstractmethod +from .deploy_system import get_stack_status + + +def get_stack(config, stack): + if stack == "package-registry": + return package_registry_stack(config, stack) + else: + return base_stack(config, stack) + + +class base_stack(ABC): + + def __init__(self, config, stack): + self.config = config + self.stack = stack + + @abstractmethod + def ensure_available(self): + pass + + @abstractmethod + def get_url(self): + pass + + +class package_registry_stack(base_stack): + + def ensure_available(self): + self.url = "" + # Check if we were given an external registry URL + url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL") + if url_from_environment: + if self.config.verbose: + print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}") + self.url = url_from_environment + else: + # Otherwise we expect to use the local package-registry stack + # First check if the stack is up + registry_running = get_stack_status(self.config, "package-registry") + if registry_running: + # If it is available, get its mapped port and construct its URL + if self.config.debug: + print("Found local package registry stack is up") + # TODO: get url from deploy-stack + self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/" + else: + # If not, print a message about how to start it and return fail to the caller + print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL") + print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up") + return False + return True + + def get_url(self): + return self.url diff --git a/build/lib/app/build_containers.py b/build/lib/app/build_containers.py new file mode 100644 index 00000000..6cd8f2b8 --- /dev/null +++ b/build/lib/app/build_containers.py @@ -0,0 +1,141 @@ +# Copyright © 2022, 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds or pulls containers for the system components + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +# TODO: display the available list of containers; allow re-build of either all or specific containers + +import os +import sys +from decouple import config +import subprocess +import click +import importlib.resources +from pathlib import Path +from .util import include_exclude_check, get_parsed_stack_config + +# TODO: find a place for this +# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" + + +@click.command() +@click.option('--include', help="only build these containers") +@click.option('--exclude', help="don\'t build these containers") +@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild") +@click.option("--extra-build-args", help="Supply extra arguments to build") +@click.pass_context +def command(ctx, include, exclude, force_rebuild, extra_build_args): + '''build the set of containers required for a complete stack''' + + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + dry_run = ctx.obj.dry_run + debug = ctx.obj.debug + local_stack = ctx.obj.local_stack + stack = ctx.obj.stack + continue_on_error = ctx.obj.continue_on_error + + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + container_build_dir = Path(__file__).absolute().parent.joinpath("data", "container-build") + + if local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + if not quiet: + print(f'Dev Root is: {dev_root_path}') + + if not os.path.isdir(dev_root_path): + print('Dev root directory doesn\'t exist, creating') + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file: + all_containers = container_list_file.read().splitlines() + + containers_in_scope = [] + if stack: + stack_config = get_parsed_stack_config(stack) + containers_in_scope = stack_config['containers'] + else: + containers_in_scope = all_containers + + if verbose: + print(f'Containers: {containers_in_scope}') + if stack: + print(f"Stack: {stack}") + + # TODO: make this configurable + container_build_env = { + "CERC_NPM_REGISTRY_URL": config("CERC_NPM_REGISTRY_URL", default="http://gitea.local:3000/api/packages/cerc-io/npm/"), + "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), + "CERC_REPO_BASE_DIR": dev_root_path, + "CERC_CONTAINER_BASE_DIR": container_build_dir, + "CERC_HOST_UID": f"{os.getuid()}", + "CERC_HOST_GID": f"{os.getgid()}", + "DOCKER_BUILDKIT": "0" + } + container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) + container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + docker_host_env = os.getenv("DOCKER_HOST") + if docker_host_env: + container_build_env.update({"DOCKER_HOST": docker_host_env}) + + def process_container(container): + if not quiet: + print(f"Building: {container}") + build_dir = os.path.join(container_build_dir, container.replace("/", "-")) + build_script_filename = os.path.join(build_dir, "build.sh") + if verbose: + print(f"Build script filename: {build_script_filename}") + if os.path.exists(build_script_filename): + build_command = build_script_filename + else: + if verbose: + print(f"No script file found: {build_script_filename}, using default build script") + repo_dir = container.split('/')[1] + # TODO: make this less of a hack -- should be specified in some metadata somewhere + # Check if we have a repo for this container. If not, set the context dir to the container-build subdir + repo_full_path = os.path.join(dev_root_path, repo_dir) + repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir + build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}" + if not dry_run: + if verbose: + print(f"Executing: {build_command} with environment: {container_build_env}") + build_result = subprocess.run(build_command, shell=True, env=container_build_env) + if verbose: + print(f"Return code is: {build_result.returncode}") + if build_result.returncode != 0: + print(f"Error running build for {container}") + if not continue_on_error: + print("FATAL Error: container build failed and --continue-on-error not set, exiting") + sys.exit(1) + else: + print("****** Container Build Error, continuing because --continue-on-error is set") + else: + print("Skipped") + + for container in containers_in_scope: + if include_exclude_check(container, include, exclude): + process_container(container) + else: + if verbose: + print(f"Excluding: {container}") diff --git a/build/lib/app/build_npms.py b/build/lib/app/build_npms.py new file mode 100644 index 00000000..d56b2774 --- /dev/null +++ b/build/lib/app/build_npms.py @@ -0,0 +1,171 @@ +# Copyright © 2022, 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Builds or pulls containers for the system components + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +import os +import sys +from shutil import rmtree, copytree +from decouple import config +import click +import importlib.resources +from python_on_whales import docker, DockerException +from .base import get_stack +from .util import include_exclude_check, get_parsed_stack_config + +builder_js_image_name = "cerc/builder-js:local" + +@click.command() +@click.option('--include', help="only build these packages") +@click.option('--exclude', help="don\'t build these packages") +@click.option("--force-rebuild", is_flag=True, default=False, help="Override existing target package version check -- force rebuild") +@click.option("--extra-build-args", help="Supply extra arguments to build") +@click.pass_context +def command(ctx, include, exclude, force_rebuild, extra_build_args): + '''build the set of npm packages required for a complete stack''' + + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + dry_run = ctx.obj.dry_run + local_stack = ctx.obj.local_stack + debug = ctx.obj.debug + stack = ctx.obj.stack + continue_on_error = ctx.obj.continue_on_error + + _ensure_prerequisites() + + # build-npms depends on having access to a writable package registry + # so we check here that it is available + package_registry_stack = get_stack(ctx.obj, "package-registry") + registry_available = package_registry_stack.ensure_available() + if not registry_available: + print("FATAL: no npm registry available for build-npms command") + sys.exit(1) + npm_registry_url = package_registry_stack.get_url() + npm_registry_url_token = config("CERC_NPM_AUTH_TOKEN", default=None) + if not npm_registry_url_token: + print("FATAL: CERC_NPM_AUTH_TOKEN is not defined") + sys.exit(1) + + if local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + build_root_path = os.path.join(dev_root_path, "build-trees") + + if verbose: + print(f'Dev Root is: {dev_root_path}') + + if not os.path.isdir(dev_root_path): + print('Dev root directory doesn\'t exist, creating') + os.makedirs(dev_root_path) + if not os.path.isdir(dev_root_path): + print('Build root directory doesn\'t exist, creating') + os.makedirs(build_root_path) + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: + all_packages = package_list_file.read().splitlines() + + packages_in_scope = [] + if stack: + stack_config = get_parsed_stack_config(stack) + # TODO: syntax check the input here + packages_in_scope = stack_config['npms'] + else: + packages_in_scope = all_packages + + if verbose: + print(f'Packages: {packages_in_scope}') + + def build_package(package): + if not quiet: + print(f"Building npm package: {package}") + repo_dir = package + repo_full_path = os.path.join(dev_root_path, repo_dir) + # Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo + repo_copy_path = os.path.join(build_root_path, repo_dir) + # First delete any old build tree + if os.path.isdir(repo_copy_path): + if verbose: + print(f"Deleting old build tree: {repo_copy_path}") + if not dry_run: + rmtree(repo_copy_path) + # Now copy the repo into the build tree location + if verbose: + print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}") + if not dry_run: + copytree(repo_full_path, repo_copy_path) + build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"] + if not dry_run: + if verbose: + print(f"Executing: {build_command}") + # Originally we used the PEP 584 merge operator: + # envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + # but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update: + envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token, + "LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages + } + envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {}) + envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {}) + envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {}) + try: + docker.run(builder_js_image_name, + remove=True, + interactive=True, + tty=True, + user=f"{os.getuid()}:{os.getgid()}", + envs=envs, + # TODO: detect this host name in npm_registry_url rather than hard-wiring it + add_hosts=[("gitea.local", "host-gateway")], + volumes=[(repo_copy_path, "/workspace")], + command=build_command + ) + # Note that although the docs say that build_result should contain + # the command output as a string, in reality it is always the empty string. + # Since we detect errors via catching exceptions below, we can safely ignore it here. + except DockerException as e: + print(f"Error executing build for {package} in container:\n {e}") + if not continue_on_error: + print("FATAL Error: build failed and --continue-on-error not set, exiting") + sys.exit(1) + else: + print("****** Build Error, continuing because --continue-on-error is set") + + else: + print("Skipped") + + for package in packages_in_scope: + if include_exclude_check(package, include, exclude): + build_package(package) + else: + if verbose: + print(f"Excluding: {package}") + + +def _ensure_prerequisites(): + # Check that the builder-js container is available and + # Tell the user how to build it if not + images = docker.image.list(builder_js_image_name) + if len(images) == 0: + print(f"FATAL: builder image: {builder_js_image_name} is required but was not found") + print("Please run this command to create it: laconic-so --stack build-support build-containers") + sys.exit(1) diff --git a/build/lib/app/data/__init__.py b/build/lib/app/data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/app/data/build_tag.txt b/build/lib/app/data/build_tag.txt new file mode 100644 index 00000000..f1a675f1 --- /dev/null +++ b/build/lib/app/data/build_tag.txt @@ -0,0 +1,2 @@ +# This file should be re-generated running: scripts/create_build_tag_file.sh script +1.1.0-882374a-202305060108 diff --git a/build/lib/app/data/compose/docker-compose-contract.yml b/build/lib/app/data/compose/docker-compose-contract.yml new file mode 100644 index 00000000..b8815f90 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-contract.yml @@ -0,0 +1,11 @@ +version: "3.2" +services: + contract: + depends_on: + go-ethereum: + condition: service_healthy + image: cerc/test-contract:local + environment: + ETH_ADDR: "http://go-ethereum:8545" + ports: + - "127.0.0.1:3000:3000" diff --git a/build/lib/app/data/compose/docker-compose-eth-probe.yml b/build/lib/app/data/compose/docker-compose-eth-probe.yml new file mode 100644 index 00000000..8051db20 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-eth-probe.yml @@ -0,0 +1,59 @@ +version: '3.2' + +services: + eth-probe-db: + restart: always + image: timescale/timescaledb:latest-pg14 + environment: + POSTGRES_USER: "probe" + POSTGRES_DB: "probe" + POSTGRES_PASSWORD: "probe" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + volumes: + - ../../eth-probe/db/schema.sql:/docker-entrypoint-initdb.d/init.sql + ports: + - 5432 + eth-probe-mq: + restart: always + image: cerc/eth-probe:local + environment: + MODE: "mq" + PROBE_DEV: "false" + PGPORT: 5432 + PGPASSWORD: "probe" + DB_USER: "probe" + PROBE_DB_NAME: "probe" + PROBE_DB_LOCATION: "eth-probe-db" + MQ_HOST: "0.0.0.0" + MQ_PORT: 33333 + DEBUG: "vulcanize:*,cerc:*" + healthcheck: + test: [ "CMD", "nc", "-v", "localhost", "33333" ] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + depends_on: + eth-probe-db: + condition: service_healthy + eth-probe-probe: + restart: always + image: cerc/eth-probe:local + environment: + MODE: "probe" + PROBE_DEV: "false" + MQ_HOST: "eth-probe-mq" + MQ_PORT: 33333 + PROBE_ID: 0 + GETH_HOST: "fixturenet-eth-geth-1" + GETH_MIN_BLOCK: 5 + GETHJSON_URL: "http://fixturenet-eth-geth-1:9898/geth.json" + DEBUG: "vulcanize:*,cerc:*,-vulcanize:sniffer:dpt:error" + depends_on: + eth-probe-mq: + condition: service_healthy diff --git a/build/lib/app/data/compose/docker-compose-eth-statediff-fill-service.yml b/build/lib/app/data/compose/docker-compose-eth-statediff-fill-service.yml new file mode 100644 index 00000000..16135250 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-eth-statediff-fill-service.yml @@ -0,0 +1,20 @@ +version: "3.2" +services: + eth-statediff-fill-service: + restart: unless-stopped + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/eth-statediff-fill-service:local + environment: + ETH_SERVER_HTTPPATH: 0.0.0.0:8085 + VDB_COMMAND: "serve" + DATABASE_NAME: "cerc_testing" + DATABASE_HOSTNAME: "ipld-eth-db" + DATABASE_PORT: 5432 + DATABASE_USER: "vdbm" + DATABASE_PASSWORD: "password" + ETH_HTTP_PATH: $eth_http_path + WATCHED_ADDRESS_GAP_FILLER_INTERVAL: $watched_address_gap_filler_interval + ports: + - "127.0.0.1:8085:8085" diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-eth-metrics.yml b/build/lib/app/data/compose/docker-compose-fixturenet-eth-metrics.yml new file mode 100644 index 00000000..832e6589 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-eth-metrics.yml @@ -0,0 +1,23 @@ +version: "3.2" +services: + prometheus: + restart: always + image: prom/prometheus + depends_on: + fixturenet-eth-geth-1: + condition: service_healthy + volumes: + - ../config/fixturenet-eth-metrics/prometheus/etc:/etc/prometheus + ports: + - "9090" + grafana: + restart: always + image: grafana/grafana + environment: + - GF_SECURITY_ADMIN_PASSWORD=changeme6325 + volumes: + - ../config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards:/etc/grafana/provisioning/dashboards + - ../config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources:/etc/grafana/provisioning/datasources + - ../config/fixturenet-eth-metrics/grafana/etc/dashboards:/etc/grafana/dashboards + ports: + - "3000" diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-eth.yml b/build/lib/app/data/compose/docker-compose-fixturenet-eth.yml new file mode 100644 index 00000000..c687b326 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-eth.yml @@ -0,0 +1,123 @@ +version: '3.7' + +services: + fixturenet-eth-bootnode-geth: + hostname: fixturenet-eth-bootnode-geth + env_file: + - ../config/fixturenet-eth/fixturenet-eth.env + environment: + RUN_BOOTNODE: "true" + image: cerc/fixturenet-eth-geth:local + volumes: + - fixturenet_eth_bootnode_geth_data:/root/ethdata + ports: + - "9898" + - "30303" + + fixturenet-eth-geth-1: + hostname: fixturenet-eth-geth-1 + cap_add: + - SYS_PTRACE + environment: + CERC_REMOTE_DEBUG: "true" + CERC_RUN_STATEDIFF: "detect" + CERC_STATEDIFF_DB_NODE_ID: 1 + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + env_file: + - ../config/fixturenet-eth/fixturenet-eth.env + image: cerc/fixturenet-eth-geth:local + volumes: + - fixturenet_eth_geth_1_data:/root/ethdata + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8545"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + depends_on: + - fixturenet-eth-bootnode-geth + ports: + - "8545" + - "40000" + - "6060" + + fixturenet-eth-geth-2: + hostname: fixturenet-eth-geth-2 + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8545"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + env_file: + - ../config/fixturenet-eth/fixturenet-eth.env + image: cerc/fixturenet-eth-geth:local + depends_on: + - fixturenet-eth-bootnode-geth + volumes: + - fixturenet_eth_geth_2_data:/root/ethdata + + fixturenet-eth-bootnode-lighthouse: + hostname: fixturenet-eth-bootnode-lighthouse + environment: + RUN_BOOTNODE: "true" + image: cerc/fixturenet-eth-lighthouse:local + volumes: + - fixturenet_eth_bootnode_lighthouse_data:/opt/testnet/build/cl + + fixturenet-eth-lighthouse-1: + hostname: fixturenet-eth-lighthouse-1 + healthcheck: + test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 30s + env_file: + - ../config/fixturenet-eth/fixturenet-eth.env + environment: + NODE_NUMBER: "1" + ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545" + EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551" + image: cerc/fixturenet-eth-lighthouse:local + volumes: + - fixturenet_eth_lighthouse_1_data:/opt/testnet/build/cl + depends_on: + fixturenet-eth-bootnode-lighthouse: + condition: service_started + fixturenet-eth-geth-1: + condition: service_healthy + ports: + - "8001" + + fixturenet-eth-lighthouse-2: + hostname: fixturenet-eth-lighthouse-2 + healthcheck: + test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 30s + env_file: + - ../config/fixturenet-eth/fixturenet-eth.env + environment: + NODE_NUMBER: "2" + ETH1_ENDPOINT: "http://fixturenet-eth-geth-2:8545" + EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551" + LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0" + image: cerc/fixturenet-eth-lighthouse:local + volumes: + - fixturenet_eth_lighthouse_2_data:/opt/testnet/build/cl + depends_on: + fixturenet-eth-bootnode-lighthouse: + condition: service_started + fixturenet-eth-geth-2: + condition: service_healthy + +volumes: + fixturenet_eth_bootnode_geth_data: + fixturenet_eth_geth_1_data: + fixturenet_eth_geth_2_data: + fixturenet_eth_bootnode_lighthouse_data: + fixturenet_eth_lighthouse_1_data: + fixturenet_eth_lighthouse_2_data: diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-laconic-console.yml b/build/lib/app/data/compose/docker-compose-fixturenet-laconic-console.yml new file mode 100644 index 00000000..23185957 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-laconic-console.yml @@ -0,0 +1,8 @@ +services: + laconic-console: + restart: unless-stopped + image: cerc/laconic-console-host:local + environment: + - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost} + ports: + - "80" diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-laconicd.yml b/build/lib/app/data/compose/docker-compose-fixturenet-laconicd.yml new file mode 100644 index 00000000..9a974e90 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-laconicd.yml @@ -0,0 +1,26 @@ +version: "3.2" +services: + laconicd: + restart: unless-stopped + image: cerc/laconicd:local + command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] + volumes: + # TODO: look at folding these scripts into the container + - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh + - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh + - ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh + # TODO: determine which of the ports below is really needed + ports: + - "6060" + - "26657" + - "26656" + - "9473:9473" + - "8545" + - "8546" + - "9090" + - "9091" + - "1317" + cli: + image: cerc/laconic-registry-cli:local + volumes: + - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-lotus.yml b/build/lib/app/data/compose/docker-compose-fixturenet-lotus.yml new file mode 100644 index 00000000..b07784dd --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-lotus.yml @@ -0,0 +1,83 @@ +version: "3.8" +services: + + lotus-miner: + hostname: lotus-miner + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-miner.sh:/docker-entrypoint-scripts.d/setup-miner.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car + - $HOME/stack-orchestrator/app/data/config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - lotus-local-net-shared:/root/.lotus-local-net + # healthcheck: + # test: ["CMD-SHELL", "grep 'started ChainNotify channel' /var/log/lotus.log"] + # interval: 10s + # timeout: 10s + # retries: 5 + # start_period: 30s +# depends_on: +# - fixturenet-lotus-bootnode + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-miner.sh"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" + + lotus-node-1: + hostname: lotus-node-1 + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car + - ../config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - ./myscripts/pre-seal-t01000.key:/root/.genesis-sectors/pre-seal-t01000.key +# - ./myscripts/pre-seal-t01000.json:/root/.genesis-sectors/pre-seal-t01000.json +# - lotus-local-net-shared:/root/.lotus-local-net +# healthcheck: +# test: ["CMD", "nc", "-v", "localhost", "8545"] +# interval: 30s +# timeout: 10s +# retries: 10 +# start_period: 3s +# depends_on: +# - fixturenet-lotus-1 + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"] +# entrypoint: ["lotus", "--version"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" + + lotus-node-2: + hostname: lotus-node-2 + env_file: + - ../config/fixturenet-lotus/lotus-env.env + image: cerc/lotus:local + volumes: + - ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh + - ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car + - ../config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors +# - ./myscripts/pre-seal-t01000.key:/root/.genesis-sectors/pre-seal-t01000.key +# - ./myscripts/pre-seal-t01000.json:/root/.genesis-sectors/pre-seal-t01000.json +# - lotus-local-net-shared:/root/.lotus-local-net +# healthcheck: +# test: ["CMD", "nc", "-v", "localhost", "8545"] +# interval: 30s +# timeout: 10s +# retries: 10 +# start_period: 3s +# depends_on: +# - fixturenet-lotus-1 + entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"] +# entrypoint: ["lotus", "--version"] + ports: + - "1234" + - "2345" + - "3456" + - "1777" diff --git a/build/lib/app/data/compose/docker-compose-fixturenet-optimism.yml b/build/lib/app/data/compose/docker-compose-fixturenet-optimism.yml new file mode 100644 index 00000000..c9e17b03 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-fixturenet-optimism.yml @@ -0,0 +1,155 @@ +version: '3.7' + +services: + # Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts) + # Creates / updates the configuration for L1 contracts deployment + # Deploys the L1 smart contracts (outputs to volume l1_deployment) + fixturenet-optimism-contracts: + hostname: fixturenet-optimism-contracts + image: cerc/optimism-contracts:local + env_file: + - ../config/fixturenet-optimism/l1-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID} + CERC_L1_RPC: ${CERC_L1_RPC} + CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} + CERC_L1_ADDRESS: ${CERC_L1_ADDRESS} + CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY} + CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2} + CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2} + # Waits for L1 endpoint to be up before running the script + command: | + "./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh" + volumes: + - ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh + - ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts + - ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts + - ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts + - ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js + - ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh + - l2_accounts:/l2-accounts + - l1_deployment:/app/packages/contracts-bedrock + extra_hosts: + - "host.docker.internal:host-gateway" + + # Generates the config files required for L2 (outputs to volume l2_config) + op-node-l2-config-gen: + image: cerc/optimism-op-node:local + depends_on: + fixturenet-optimism-contracts: + condition: service_completed_successfully + env_file: + - ../config/fixturenet-optimism/l1-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L1_RPC: ${CERC_L1_RPC} + volumes: + - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh + - l1_deployment:/contracts-bedrock:ro + - l2_config:/app + command: ["sh", "/app/generate-l2-config.sh"] + extra_hosts: + - "host.docker.internal:host-gateway" + + # Initializes and runs the L2 execution client (outputs to volume l2_geth_data) + op-geth: + image: cerc/optimism-l2geth:local + depends_on: + op-node-l2-config-gen: + condition: service_started + volumes: + - ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh + - l2_config:/op-node:ro + - l2_accounts:/l2-accounts:ro + - l2_geth_data:/datadir + entrypoint: "sh" + command: "/run-op-geth.sh" + ports: + - "0.0.0.0:8545:8545" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost:8545"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 10s + + # Runs the L2 consensus client (Sequencer node) + op-node: + image: cerc/optimism-op-node:local + depends_on: + op-geth: + condition: service_healthy + env_file: + - ../config/fixturenet-optimism/l1-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L1_RPC: ${CERC_L1_RPC} + volumes: + - ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh + - l2_config:/op-node-data:ro + - l2_accounts:/l2-accounts:ro + command: ["sh", "/app/run-op-node.sh"] + ports: + - "0.0.0.0:8547:8547" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost:8547"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" + + # Runs the batcher (takes transactions from the Sequencer and publishes them to L1) + op-batcher: + image: cerc/optimism-op-batcher:local + depends_on: + op-node: + condition: service_healthy + op-geth: + condition: service_healthy + env_file: + - ../config/fixturenet-optimism/l1-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L1_RPC: ${CERC_L1_RPC} + volumes: + - ../config/wait-for-it.sh:/wait-for-it.sh + - ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh + - l2_accounts:/l2-accounts:ro + entrypoint: ["sh", "-c"] + # Waits for L1 endpoint to be up before running the batcher + command: | + "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh" + extra_hosts: + - "host.docker.internal:host-gateway" + + # Runs the proposer (periodically submits new state roots to L1) + op-proposer: + image: cerc/optimism-op-proposer:local + depends_on: + op-node: + condition: service_healthy + env_file: + - ../config/fixturenet-optimism/l1-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L1_RPC: ${CERC_L1_RPC} + volumes: + - ../config/wait-for-it.sh:/wait-for-it.sh + - ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh + - l1_deployment:/contracts-bedrock:ro + - l2_accounts:/l2-accounts:ro + entrypoint: ["sh", "-c"] + # Waits for L1 endpoint to be up before running the proposer + command: | + "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh" + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + l1_deployment: + l2_accounts: + l2_config: + l2_geth_data: diff --git a/build/lib/app/data/compose/docker-compose-foundry.yml b/build/lib/app/data/compose/docker-compose-foundry.yml new file mode 100644 index 00000000..e489611c --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-foundry.yml @@ -0,0 +1,8 @@ +# Add-on pod to include foundry tooling within a fixturenet +services: + foundry: + image: cerc/foundry:local + command: ["while :; do sleep 600; done"] + volumes: + - ../config/foundry/foundry.toml:/foundry.toml + - ./foundry/workspace:/workspace diff --git a/build/lib/app/data/compose/docker-compose-go-ethereum-foundry.yml b/build/lib/app/data/compose/docker-compose-go-ethereum-foundry.yml new file mode 100644 index 00000000..3da40d7f --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-go-ethereum-foundry.yml @@ -0,0 +1,30 @@ +version: "3.2" + +services: + go-ethereum: + restart: unless-stopped + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/go-ethereum-foundry:local + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "8545"] + interval: 30s + timeout: 3s + retries: 10 + environment: + DB_USER: vdbm + DB_NAME: cerc_testing + DB_HOST: ipld-eth-db + DB_PORT: 5432 + DB_PASSWORD: password + DB_WRITE: "true" + DB_TYPE: postgres + DB_DRIVER: sqlx + DB_WAIT_FOR_SYNC: "true" + CHAIN_ID: "99" + USE_GENESIS: "true" + EXTRA_START_ARGS: "--metrics --metrics.expensive --metrics.addr 0.0.0.0 --metrics.port 6060" + ports: + - "127.0.0.1:8545:8545" + - "127.0.0.1:8546:8546" diff --git a/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-db.yml b/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-db.yml new file mode 100644 index 00000000..bb54cd1f --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-db.yml @@ -0,0 +1,17 @@ +version: "3.2" +services: + ipld-eth-beacon-db: + restart: always + image: cerc/ipld-eth-beacon-db:local + environment: + POSTGRES_USER: vdbm + POSTGRES_DB: cerc_testing + POSTGRES_PASSWORD: password + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + - "127.0.0.1:8076:5432" diff --git a/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml b/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml new file mode 100644 index 00000000..533edf3c --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-ipld-eth-beacon-indexer.yml @@ -0,0 +1,12 @@ +version: "3.2" +services: + ipld-eth-beacon-indexer: + restart: unless-stopped + depends_on: + ipld-eth-beacon-db: + condition: service_healthy + fixturenet-eth-lighthouse-1: + condition: service_healthy + image: cerc/ipld-eth-beacon-indexer:local + env_file: + - ../config/ipld-eth-beacon-indexer/indexer.env diff --git a/build/lib/app/data/compose/docker-compose-ipld-eth-db.yml b/build/lib/app/data/compose/docker-compose-ipld-eth-db.yml new file mode 100644 index 00000000..f437c66d --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-ipld-eth-db.yml @@ -0,0 +1,31 @@ +version: "3.2" + +services: + migrations: + restart: on-failure + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-db:local + environment: + DATABASE_USER: "vdbm" + DATABASE_NAME: "cerc_testing" + DATABASE_PASSWORD: "password" + DATABASE_HOSTNAME: "ipld-eth-db" + DATABASE_PORT: 5432 + + ipld-eth-db: + image: timescale/timescaledb:2.8.1-pg14 + restart: always + environment: + POSTGRES_USER: "vdbm" + POSTGRES_DB: "cerc_testing" + POSTGRES_PASSWORD: "password" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + - "127.0.0.1:8077:5432" diff --git a/build/lib/app/data/compose/docker-compose-ipld-eth-server.yml b/build/lib/app/data/compose/docker-compose-ipld-eth-server.yml new file mode 100644 index 00000000..e02f73ca --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-ipld-eth-server.yml @@ -0,0 +1,46 @@ +version: "3.2" +services: + ipld-eth-server: + restart: unless-stopped + depends_on: + ipld-eth-db: + condition: service_healthy + image: cerc/ipld-eth-server:local + environment: + IPLD_SERVER_GRAPHQL: "true" + IPLD_POSTGRAPHILEPATH: http://graphql:5000 + ETH_SERVER_HTTPPATH: 0.0.0.0:8081 + ETH_SERVER_GRAPHQL: "true" + ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082 + VDB_COMMAND: "serve" + ETH_CHAIN_CONFIG: "/tmp/chain.json" + DATABASE_NAME: cerc_testing + DATABASE_HOSTNAME: ipld-eth-db + DATABASE_PORT: 5432 + DATABASE_USER: "vdbm" + DATABASE_PASSWORD: "password" + ETH_CHAIN_ID: 99 + ETH_FORWARD_ETH_CALLS: "false" + ETH_FORWARD_GET_STORAGE_AT: "false" + ETH_PROXY_ON_ERROR: "false" + METRICS: "true" + PROM_HTTP: "true" + PROM_HTTP_ADDR: "0.0.0.0" + PROM_HTTP_PORT: "8090" + LOGRUS_LEVEL: "debug" + CERC_REMOTE_DEBUG: "true" + volumes: + - type: bind + source: ../config/ipld-eth-server/chain.json + target: /tmp/chain.json + ports: + - "8081" + - "8082" + - "8090" + - "40000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "8081"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s diff --git a/build/lib/app/data/compose/docker-compose-keycloak.yml b/build/lib/app/data/compose/docker-compose-keycloak.yml new file mode 100644 index 00000000..1586cdb7 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-keycloak.yml @@ -0,0 +1,45 @@ +version: '3.8' + +services: + keycloak-db: + image: postgres:14-alpine + env_file: + - ../config/keycloak/keycloak.env + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 3s + ports: + - 5432 + keycloak: + image: cerc/keycloak:local + env_file: + - ../config/keycloak/keycloak.env + environment: + JAVA_OPTS_APPEND: "-Dkeycloak.migration.action=import -Dkeycloak.migration.provider=dir -Dkeycloak.migration.dir=/import -Dkeycloak.migration.strategy=IGNORE_EXISTING" + volumes: + - ../config/keycloak/import:/import + ports: + - 8080 + command: ["start"] + depends_on: + keycloak-db: + condition: service_healthy + keycloak-nginx: + image: nginx:1.23-alpine + restart: always + volumes: + - ../config/keycloak/nginx:/etc/nginx/conf.d + ports: + - 80 + depends_on: + - keycloak + keycloak-nginx-prometheus-exporter: + image: nginx/nginx-prometheus-exporter + restart: always + environment: + - SCRAPE_URI=http://keycloak-nginx:80/stub_status + depends_on: + - keycloak-nginx diff --git a/build/lib/app/data/compose/docker-compose-kubo.yml b/build/lib/app/data/compose/docker-compose-kubo.yml new file mode 100644 index 00000000..f5f8b06e --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-kubo.yml @@ -0,0 +1,13 @@ +version: "3.2" +# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up +services: + ipfs: + image: ipfs/kubo:master-2023-02-20-714a968 + restart: always + volumes: + - ./ipfs/import:/import + - ./ipfs/data:/data/ipfs + ports: + - "0.0.0.0:8080:8080" + - "0.0.0.0:4001:4001" + - "0.0.0.0:5001:5001" diff --git a/build/lib/app/data/compose/docker-compose-laconicd.yml b/build/lib/app/data/compose/docker-compose-laconicd.yml new file mode 100644 index 00000000..753283bd --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-laconicd.yml @@ -0,0 +1,17 @@ +version: "3.2" +services: + laconicd: + restart: unless-stopped + image: cerc/laconicd:local + # TODO: determine which of the ports below is really needed + ports: + - "6060" + - "26657" + - "26656" + - "9473" + - "8545" + - "8546" + - "9090" + - "9091" + - "1317" + diff --git a/build/lib/app/data/compose/docker-compose-mobymask-app.yml b/build/lib/app/data/compose/docker-compose-mobymask-app.yml new file mode 100644 index 00000000..d43e6b44 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-mobymask-app.yml @@ -0,0 +1,68 @@ +version: '3.2' + +services: + # Builds and serves the MobyMask react-app + mobymask-app: + restart: unless-stopped + image: cerc/mobymask-ui:local + env_file: + - ../config/watcher-mobymask-v2/mobymask-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_CHAIN_ID: ${CERC_CHAIN_ID} + CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT} + CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL} + CERC_RELAY_NODES: ${CERC_RELAY_NODES} + CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build" + working_dir: /scripts + command: ["sh", "mobymask-app-start.sh"] + volumes: + - ../config/wait-for-it.sh:/scripts/wait-for-it.sh + - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh + - peers_ids:/peers + - mobymask_deployment:/server + ports: + - "0.0.0.0:3002:80" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "80"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" + + # Builds and serves the LXDAO version of MobyMask react-app + lxdao-mobymask-app: + restart: unless-stopped + image: cerc/mobymask-ui:local + env_file: + - ../config/watcher-mobymask-v2/mobymask-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_CHAIN_ID: ${CERC_CHAIN_ID} + CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT} + CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL} + CERC_RELAY_NODES: ${CERC_RELAY_NODES} + CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build" + working_dir: /scripts + command: ["sh", "mobymask-app-start.sh"] + volumes: + - ../config/wait-for-it.sh:/scripts/wait-for-it.sh + - ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh + - peers_ids:/peers + - mobymask_deployment:/server + ports: + - "0.0.0.0:3004:80" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "80"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + mobymask_deployment: + peers_ids: diff --git a/build/lib/app/data/compose/docker-compose-peer-test-app.yml b/build/lib/app/data/compose/docker-compose-peer-test-app.yml new file mode 100644 index 00000000..f2a22675 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-peer-test-app.yml @@ -0,0 +1,30 @@ +version: '3.2' + +services: + peer-test-app: + # Builds and serves the peer-test react-app + image: cerc/react-peer:local + working_dir: /scripts + env_file: + - ../config/watcher-mobymask-v2/mobymask-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_RELAY_NODES: ${CERC_RELAY_NODES} + command: ["sh", "test-app-start.sh"] + volumes: + - ../config/wait-for-it.sh:/scripts/wait-for-it.sh + - ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh + - peers_ids:/peers + ports: + - "0.0.0.0:3003:80" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "80"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + peers_ids: diff --git a/build/lib/app/data/compose/docker-compose-test.yml b/build/lib/app/data/compose/docker-compose-test.yml new file mode 100644 index 00000000..929f6feb --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-test.yml @@ -0,0 +1,7 @@ +version: "3.2" +services: + test: + image: cerc/test-container:local + restart: always + ports: + - "80" diff --git a/build/lib/app/data/compose/docker-compose-tx-spammer.yml b/build/lib/app/data/compose/docker-compose-tx-spammer.yml new file mode 100644 index 00000000..2d203b10 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-tx-spammer.yml @@ -0,0 +1,18 @@ +version: '3.2' + +services: + tx-spammer: + restart: always + image: cerc/tx-spammer:local + env_file: + - ../config/tx-spammer/tx-spammer.env + environment: + ACCOUNTS_CSV_URL: http://fixturenet-eth-bootnode-geth:9898/accounts.csv + ETH_HTTP_PATH: http://fixturenet-eth-geth-1:8545 + LOG_LEVEL: debug + SPAMMER_COMMAND: autoSend + depends_on: + fixturenet-eth-bootnode-geth: + condition: service_started + fixturenet-eth-geth-1: + condition: service_healthy diff --git a/build/lib/app/data/compose/docker-compose-watcher-erc20.yml b/build/lib/app/data/compose/docker-compose-watcher-erc20.yml new file mode 100644 index 00000000..a094250c --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-watcher-erc20.yml @@ -0,0 +1,49 @@ +version: '3.2' + +services: + + erc20-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=erc20-watcher,erc20-watcher-job-queue + - POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - erc20_watcher_db_data:/var/lib/postgresql/data + ports: + - "0.0.0.0:15433:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + erc20-watcher: + restart: unless-stopped + depends_on: + ipld-eth-server: + condition: service_healthy + erc20-watcher-db: + condition: service_healthy + image: cerc/watcher-erc20:local + environment: + - ETH_RPC_URL=http://go-ethereum:8545 + command: ["sh", "-c", "yarn server"] + volumes: + - ../config/watcher-erc20/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml + ports: + - "0.0.0.0:3002:3001" + - "0.0.0.0:9002:9001" + healthcheck: + test: ["CMD", "nc", "-vz", "localhost", "3001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + +volumes: + erc20_watcher_db_data: diff --git a/build/lib/app/data/compose/docker-compose-watcher-erc721.yml b/build/lib/app/data/compose/docker-compose-watcher-erc721.yml new file mode 100644 index 00000000..cdfcdbc7 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-watcher-erc721.yml @@ -0,0 +1,49 @@ +version: '3.2' + +services: + + erc721-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=erc721-watcher,erc721-watcher-job-queue + - POSTGRES_EXTENSION=erc721-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - erc721_watcher_db_data:/var/lib/postgresql/data + ports: + - "0.0.0.0:15434:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + erc721-watcher: + restart: unless-stopped + depends_on: + ipld-eth-server: + condition: service_healthy + erc721-watcher-db: + condition: service_healthy + image: cerc/watcher-erc721:local + environment: + - ETH_RPC_URL=http://go-ethereum:8545 + command: ["sh", "-c", "yarn server"] + volumes: + - ../config/watcher-erc721/erc721-watcher.toml:/app/packages/erc721-watcher/environments/local.toml + ports: + - "0.0.0.0:3009:3009" + - "0.0.0.0:9003:9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3009"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + +volumes: + erc721_watcher_db_data: diff --git a/build/lib/app/data/compose/docker-compose-watcher-mobymask-v2.yml b/build/lib/app/data/compose/docker-compose-watcher-mobymask-v2.yml new file mode 100644 index 00000000..36c5f3f9 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-watcher-mobymask-v2.yml @@ -0,0 +1,134 @@ +version: '3.2' + +services: + # Starts the PostgreSQL database for watcher + mobymask-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue + - POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - mobymask_watcher_db_data:/var/lib/postgresql/data + ports: + - "0.0.0.0:15432:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + # Deploys the MobyMask contract and generates an invite link + # Deployment is skipped if CERC_DEPLOYED_CONTRACT env is set + mobymask: + image: cerc/mobymask:local + working_dir: /app/packages/server + env_file: + - ../config/watcher-mobymask-v2/optimism-params.env + - ../config/watcher-mobymask-v2/mobymask-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + ENV: "PROD" + CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC} + CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} + CERC_PRIVATE_KEY_DEPLOYER: ${CERC_PRIVATE_KEY_DEPLOYER} + CERC_MOBYMASK_APP_BASE_URI: ${CERC_MOBYMASK_APP_BASE_URI} + CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT} + CERC_L2_GETH_HOST: ${CERC_L2_GETH_HOST} + CERC_L2_GETH_PORT: ${CERC_L2_GETH_PORT} + CERC_L2_NODE_HOST: ${CERC_L2_NODE_HOST} + CERC_L2_NODE_PORT: ${CERC_L2_NODE_PORT} + command: ["sh", "deploy-and-generate-invite.sh"] + volumes: + - ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh + - ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json + - ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh + - mobymask_deployment:/app/packages/server + extra_hosts: + - "host.docker.internal:host-gateway" + + # Creates peer-id files if they don't exist + peer-ids-gen: + image: cerc/watcher-ts:local + restart: on-failure + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + working_dir: /app/packages/peer + command: ["sh", "generate-peer-ids.sh"] + volumes: + - ../config/watcher-mobymask-v2/generate-peer-ids.sh:/app/packages/peer/generate-peer-ids.sh + - peers_ids:/peer-ids + + # Starts the mobymask-v2-watcher server + mobymask-watcher-server: + image: cerc/watcher-mobymask-v2:local + restart: unless-stopped + depends_on: + mobymask-watcher-db: + condition: service_healthy + peer-ids-gen: + condition: service_completed_successfully + mobymask: + condition: service_completed_successfully + env_file: + - ../config/watcher-mobymask-v2/optimism-params.env + - ../config/watcher-mobymask-v2/mobymask-params.env + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC} + CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} + CERC_PRIVATE_KEY_PEER: ${CERC_PRIVATE_KEY_PEER} + CERC_RELAY_PEERS: ${CERC_RELAY_PEERS} + CERC_RELAY_ANNOUNCE_DOMAIN: ${CERC_RELAY_ANNOUNCE_DOMAIN} + CERC_ENABLE_PEER_L2_TXS: ${CERC_ENABLE_PEER_L2_TXS} + CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT} + command: ["sh", "start-server.sh"] + volumes: + - ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-mobymask-v2/start-server.sh:/app/start-server.sh + - peers_ids:/app/peers + - mobymask_deployment:/server + # Expose GQL, metrics and relay node ports + ports: + - "0.0.0.0:3001:3001" + - "0.0.0.0:9001:9001" + - "0.0.0.0:9090:9090" + healthcheck: + test: ["CMD", "busybox", "nc", "localhost", "9090"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + # Container to run peer tests + peer-tests: + image: cerc/watcher-ts:local + restart: on-failure + depends_on: + mobymask-watcher-server: + condition: service_healthy + peer-ids-gen: + condition: service_completed_successfully + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + working_dir: /app/packages/peer + command: + - sh + - -c + - | + ./set-tests-env.sh && \ + tail -f /dev/null + volumes: + - ../config/watcher-mobymask-v2/set-tests-env.sh:/app/packages/peer/set-tests-env.sh + - peers_ids:/peer-ids + +volumes: + mobymask_watcher_db_data: + peers_ids: + mobymask_deployment: diff --git a/build/lib/app/data/compose/docker-compose-watcher-mobymask.yml b/build/lib/app/data/compose/docker-compose-watcher-mobymask.yml new file mode 100644 index 00000000..f54e1454 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-watcher-mobymask.yml @@ -0,0 +1,65 @@ +version: '3.2' + +# TODO: remove hard-wired host ports + +services: + + mobymask-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue + - POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - mobymask_watcher_db_data:/var/lib/postgresql/data + ports: + - "0.0.0.0:15432:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + mobymask-watcher-server: + restart: unless-stopped + depends_on: + mobymask-watcher-db: + condition: service_healthy + image: cerc/watcher-mobymask:local + command: ["sh", "-c", "yarn server"] + volumes: + - ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml + ports: + - "0.0.0.0:3001:3001" + - "0.0.0.0:9001:9001" + extra_hosts: + - "ipld-eth-server:host-gateway" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + + mobymask-watcher-job-runner: + restart: unless-stopped + depends_on: + mobymask-watcher-server: + condition: service_healthy + mobymask-watcher-db: + condition: service_healthy + image: cerc/watcher-mobymask:local + command: ["sh", "-c", "yarn job-runner"] + volumes: + - ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml + ports: + - "0.0.0.0:9000:9000" + extra_hosts: + - "ipld-eth-server:host-gateway" + +volumes: + mobymask_watcher_db_data: diff --git a/build/lib/app/data/compose/docker-compose-watcher-uniswap-v3.yml b/build/lib/app/data/compose/docker-compose-watcher-uniswap-v3.yml new file mode 100644 index 00000000..b98f6a70 --- /dev/null +++ b/build/lib/app/data/compose/docker-compose-watcher-uniswap-v3.yml @@ -0,0 +1,170 @@ +version: '3.2' + +services: + + uniswap-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=erc20-watcher,uni-watcher,uni-info-watcher,erc20-watcher-job-queue,uni-watcher-job-queue,uni-info-watcher-job-queue + - POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto,uni-watcher-job-queue:pgcrypto,uni-info-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "work_mem=2GB"] + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - ../config/postgresql/create-pg-stat-statements.sql:/docker-entrypoint-initdb.d/create-pg-stat-statements.sql + - uniswap_watcher_db_data:/var/lib/postgresql/data + ports: + - "0.0.0.0:15435:5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + shm_size: '8GB' + + erc20-watcher-server: + restart: unless-stopped + depends_on: + uniswap-watcher-db: + condition: service_healthy + image: cerc/watcher-uniswap-v3:local + working_dir: /app/packages/erc20-watcher + environment: + - DEBUG=vulcanize:* + command: ["node", "--enable-source-maps", "dist/server.js"] + volumes: + - ../config/watcher-uniswap-v3/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml + ports: + - "0.0.0.0:3005:3001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3001"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + uni-watcher-job-runner: + restart: unless-stopped + depends_on: + uniswap-watcher-db: + condition: service_healthy + image: cerc/watcher-uniswap-v3:local + working_dir: /app/packages/uni-watcher + environment: + - DEBUG=vulcanize:* + command: ["sh", "-c", "./watch-contract.sh && node --enable-source-maps dist/job-runner.js"] + volumes: + - ../config/watcher-uniswap-v3/uni-watcher.toml:/app/packages/uni-watcher/environments/local.toml + - ../config/watcher-uniswap-v3/watch-contract.sh:/app/packages/uni-watcher/watch-contract.sh + ports: + - "0.0.0.0:9004:9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + uni-watcher-server: + restart: unless-stopped + depends_on: + uniswap-watcher-db: + condition: service_healthy + uni-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-uniswap-v3:local + environment: + - UNISWAP_START_BLOCK=12369621 + - DEBUG=vulcanize:* + working_dir: /app/packages/uni-watcher + command: ["./run.sh"] + volumes: + - ../config/watcher-uniswap-v3/uni-watcher.toml:/app/packages/uni-watcher/environments/local.toml + - ../config/watcher-uniswap-v3/run.sh:/app/packages/uni-watcher/run.sh + ports: + - "0.0.0.0:3003:3003" + - "0.0.0.0:9005:9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3003"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + uni-info-watcher-job-runner: + restart: unless-stopped + depends_on: + uniswap-watcher-db: + condition: service_healthy + erc20-watcher-server: + condition: service_healthy + uni-watcher-server: + condition: service_healthy + image: cerc/watcher-uniswap-v3:local + working_dir: /app/packages/uni-info-watcher + environment: + - DEBUG=vulcanize:* + command: ["node", "--enable-source-maps", "dist/job-runner.js"] + volumes: + - ../config/watcher-uniswap-v3/uni-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml + ports: + - "0.0.0.0:9006:9002" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9002"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + uni-info-watcher-server: + restart: unless-stopped + depends_on: + uniswap-watcher-db: + condition: service_healthy + erc20-watcher-server: + condition: service_healthy + uni-watcher-server: + condition: service_healthy + uni-info-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-uniswap-v3:local + environment: + - UNISWAP_START_BLOCK=12369621 + working_dir: /app/packages/uni-info-watcher + command: ["./run.sh"] + volumes: + - ../config/watcher-uniswap-v3/uni-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml + - ../config/watcher-uniswap-v3/run.sh:/app/packages/uni-info-watcher/run.sh + ports: + - "0.0.0.0:3004:3004" + - "0.0.0.0:9007:9003" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3004"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + uniswap-v3-info: + depends_on: + uni-info-watcher-server: + condition: service_healthy + image: cerc/uniswap-v3-info:local + ports: + - "0.0.0.0:3006:3000" + +volumes: + uniswap_watcher_db_data: diff --git a/build/lib/app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml b/build/lib/app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml new file mode 100644 index 00000000..5718d4ef --- /dev/null +++ b/build/lib/app/data/config/fixturenet-eth-metrics/prometheus/etc/prometheus.yml @@ -0,0 +1,34 @@ +global: + scrape_interval: 5s + evaluation_interval: 15s + +scrape_configs: + # ipld-eth-server + - job_name: 'ipld-eth-server' + metrics_path: /metrics + scrape_interval: 5s + static_configs: + - targets: ['ipld-eth-server:8090'] + + # geth + - job_name: 'geth' + metrics_path: /debug/metrics/prometheus + scheme: http + static_configs: + - targets: ['fixturenet-eth-geth-1:6060'] + + # nginx + - job_name: 'nginx' + scrape_interval: 5s + metrics_path: /metrics + scheme: http + static_configs: + - targets: ['keycloak-nginx-prometheus-exporter:9113'] + + # keycloak + - job_name: 'keycloak' + scrape_interval: 5s + metrics_path: /auth/realms/cerc/metrics + scheme: http + static_configs: + - targets: ['keycloak:8080'] diff --git a/build/lib/app/data/config/fixturenet-eth/fixturenet-eth.env b/build/lib/app/data/config/fixturenet-eth/fixturenet-eth.env new file mode 100644 index 00000000..1c214b56 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-eth/fixturenet-eth.env @@ -0,0 +1,23 @@ +# The password used to access test accounts (eg, via personal_unlockAccount). The password is the same for all accounts. +ACCOUNT_PASSWORD=secret1212 + +# ENODE of the geth bootnode. +BOOTNODE_KEY="b0ac22adcad37213c7c565810a50f1772291e7b0ce53fb73e7ec2a3c75bc13b5" +ENODE="enode://af22c29c316ad069cf48a09a4ad5cf04a251b411e45098888d114c6dd7f489a13786620d5953738762afa13711d4ffb3b19aa5de772d8af72f851f7e9c5b164a@fixturenet-eth-bootnode-geth:30303" + +# JWT shared by geth and lighthouse for authentication. +JWT="0x6cdcac3501046a08e186730dd8bd136cfaf0fdc1fc955f6e15ad3068c0ff2af0" + +# URL to download the ENR of the lighthouse bootnode (generated at first start). +ENR_URL="http://fixturenet-eth-bootnode-lighthouse:3000/bootnode/enr.dat" + +# DB connection settings for statediffing (see docker-compose-db.yml) +CERC_STATEDIFF_DB_HOST="ipld-eth-db" +CERC_STATEDIFF_DB_PORT=5432 +CERC_STATEDIFF_DB_NAME="cerc_testing" +CERC_STATEDIFF_DB_USER="vdbm" +CERC_STATEDIFF_DB_PASSWORD="password" +CERC_STATEDIFF_DB_GOOSE_MIN_VER=23 +CERC_STATEDIFF_DB_LOG_STATEMENTS="false" + +CERC_GETH_VMODULE="statediff/*=5,rpc/*=5" diff --git a/build/lib/app/data/config/fixturenet-laconicd/create-fixturenet.sh b/build/lib/app/data/config/fixturenet-laconicd/create-fixturenet.sh new file mode 100644 index 00000000..9c30bff8 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-laconicd/create-fixturenet.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh +# so we should have a mechanism to bundle it inside the container rather than link from here +# at deploy time. + +KEY="mykey" +CHAINID="laconic_9000-1" +MONIKER="localtestnet" +KEYRING="test" +KEYALGO="eth_secp256k1" +LOGLEVEL="info" +# trace evm +TRACE="--trace" +# TRACE="" + +# validate dependencies are installed +command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } + +# remove existing daemon and client +rm -rf ~/.laconic* + +make install + +laconicd config keyring-backend $KEYRING +laconicd config chain-id $CHAINID + +# if $KEY exists it should be deleted +laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO + +# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) +laconicd init $MONIKER --chain-id $CHAINID + +# Change parameter token denominations to aphoton +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +# Custom modules +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + +if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then + echo "Setting timers for expiry tests." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +fi + +if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then + echo "Enabling auction and setting timers." + + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json +fi + +# increase block time (?) +cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + +# Set gas limit in genesis +cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json + +# disable produce empty block +if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml +fi + +if [[ $1 == "pending" ]]; then + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + else + sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml + sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml + fi +fi + +# Allocate genesis accounts (cosmos formatted addresses) +laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING + +# Sign genesis transaction +laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID + +# Collect genesis tx +laconicd collect-gentxs + +# Run this to ensure everything worked and that the genesis file is setup correctly +laconicd validate-genesis + +if [[ $1 == "pending" ]]; then + echo "pending mode is on, please wait for the first block committed." +fi + +# Start the node (remove the --pruning=nothing flag if historical queries are not needed) +laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground diff --git a/build/lib/app/data/config/fixturenet-laconicd/export-myaddress.sh b/build/lib/app/data/config/fixturenet-laconicd/export-myaddress.sh new file mode 100644 index 00000000..e454c0b0 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-laconicd/export-myaddress.sh @@ -0,0 +1,2 @@ +#!/bin/sh +laconicd keys show mykey | grep address | cut -d ' ' -f 3 diff --git a/build/lib/app/data/config/fixturenet-laconicd/export-mykey.sh b/build/lib/app/data/config/fixturenet-laconicd/export-mykey.sh new file mode 100644 index 00000000..1a5be86e --- /dev/null +++ b/build/lib/app/data/config/fixturenet-laconicd/export-mykey.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo y | laconicd keys export mykey --unarmored-hex --unsafe diff --git a/build/lib/app/data/config/fixturenet-laconicd/registry-cli-config-template.yml b/build/lib/app/data/config/fixturenet-laconicd/registry-cli-config-template.yml new file mode 100644 index 00000000..16432c18 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-laconicd/registry-cli-config-template.yml @@ -0,0 +1,9 @@ +services: + cns: + restEndpoint: 'http://laconicd:1317' + gqlEndpoint: 'http://laconicd:9473/api' + userKey: REPLACE_WITH_MYKEY + bondId: + chainId: laconic_9000-1 + gas: 250000 + fees: 200000aphoton diff --git a/build/lib/app/data/config/fixturenet-lotus/genesis/devgen.car b/build/lib/app/data/config/fixturenet-lotus/genesis/devgen.car new file mode 100644 index 00000000..a22eaed4 Binary files /dev/null and b/build/lib/app/data/config/fixturenet-lotus/genesis/devgen.car differ diff --git a/build/lib/app/data/config/fixturenet-lotus/genesis/localnet.json b/build/lib/app/data/config/fixturenet-lotus/genesis/localnet.json new file mode 100644 index 00000000..06d3123e --- /dev/null +++ b/build/lib/app/data/config/fixturenet-lotus/genesis/localnet.json @@ -0,0 +1,108 @@ +{ + "NetworkVersion": 18, + "Accounts": [ + { + "Type": "account", + "Balance": "50000000000000000000000000", + "Meta": { + "Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q" + } + } + ], + "Miners": [ + { + "ID": "t01000", + "Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9", + "MarketBalance": "0", + "PowerBalance": "0", + "SectorSize": 2048, + "Sectors": [ + { + "CommR": { + "/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf" + }, + "CommD": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "SectorID": 0, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "0", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + }, + { + "CommR": { + "/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq" + }, + "CommD": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "SectorID": 1, + "Deal": { + "PieceCID": { + "/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii" + }, + "PieceSize": 2048, + "VerifiedDeal": false, + "Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q", + "Provider": "t01000", + "Label": "1", + "StartEpoch": 0, + "EndEpoch": 9001, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealClientKey": { + "Type": "bls", + "PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4=" + }, + "ProofType": 5 + } + ] + } + ], + "NetworkName": "localnet-6d52dae5-ff29-4bac-a45d-f84e6c07564c", + "VerifregRootKey": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + }, + "RemainderAccount": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + } +} \ No newline at end of file diff --git a/build/lib/app/data/config/fixturenet-lotus/lotus-env.env b/build/lib/app/data/config/fixturenet-lotus/lotus-env.env new file mode 100644 index 00000000..582b8944 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-lotus/lotus-env.env @@ -0,0 +1,5 @@ +LOTUS_PATH=~/.lotus-local-net +LOTUS_MINER_PATH=~/.lotus-miner-local-net +LOTUS_SKIP_GENESIS_CHECK=_yes_ +CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__" +CGO_CFLAGS="-D__BLST_PORTABLE__" diff --git a/build/lib/app/data/config/fixturenet-lotus/setup-miner.sh b/build/lib/app/data/config/fixturenet-lotus/setup-miner.sh new file mode 100644 index 00000000..802ed8f5 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-lotus/setup-miner.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +lotus --version +#lotus daemon --genesis=/devgen.car --profile=bootstrapper --bootstrap=false > /var/log/lotus.log 2>&1 +lotus daemon --genesis=/devgen.car --bootstrap=false diff --git a/build/lib/app/data/config/fixturenet-lotus/setup-node.sh b/build/lib/app/data/config/fixturenet-lotus/setup-node.sh new file mode 100644 index 00000000..fe9f17d2 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-lotus/setup-node.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +lotus --version +lotus daemon --genesis=/devgen.car diff --git a/build/lib/app/data/config/fixturenet-optimism/generate-l2-config.sh b/build/lib/app/data/config/fixturenet-optimism/generate-l2-config.sh new file mode 100755 index 00000000..b10048d2 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/generate-l2-config.sh @@ -0,0 +1,37 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +# Check existing config if it exists +if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then + echo "Found existing L2 config, cross-checking with L1 deployment config" + + SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json) + EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag') + EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress') + + GEN_L2_CONF=$(cat /app/rollup.json) + GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash') + GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr') + + if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then + echo "Config cross-checked, exiting" + exit 0 + fi + + echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting" + exit 1 +fi + +op-node genesis l2 \ + --deploy-config /contracts-bedrock/deploy-config/getting-started.json \ + --deployment-dir /contracts-bedrock/deployments/getting-started/ \ + --outfile.l2 /app/genesis.json \ + --outfile.rollup /app/rollup.json \ + --l1-rpc $CERC_L1_RPC + +openssl rand -hex 32 > /app/jwt.txt diff --git a/build/lib/app/data/config/fixturenet-optimism/l1-params.env b/build/lib/app/data/config/fixturenet-optimism/l1-params.env new file mode 100644 index 00000000..5353e62f --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/l1-params.env @@ -0,0 +1,12 @@ +# Defaults + +# L1 endpoint +DEFAULT_CERC_L1_CHAIN_ID=1212 +DEFAULT_CERC_L1_RPC="http://fixturenet-eth-geth-1:8545" +DEFAULT_CERC_L1_HOST="fixturenet-eth-geth-1" +DEFAULT_CERC_L1_PORT=8545 + +# URL to get CSV with credentials for accounts on L1 +# that are used to send balance to Optimism Proxy contract +# (enables them to do transactions on L2) +DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv" diff --git a/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/run.sh b/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/run.sh new file mode 100755 index 00000000..d878c03f --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/run.sh @@ -0,0 +1,131 @@ +#!/bin/bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}" +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" + +echo "Using L1 RPC endpoint ${CERC_L1_RPC}" + +IMPORT_1="import './verify-contract-deployment'" +IMPORT_2="import './rekey-json'" +IMPORT_3="import './send-balance'" + +# Append mounted tasks to tasks/index.ts file if not present +if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then + echo "$IMPORT_1" >> tasks/index.ts + echo "$IMPORT_2" >> tasks/index.ts + echo "$IMPORT_3" >> tasks/index.ts +fi + +# Update the chainId in the hardhat config +sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts + +# Exit if a deployment already exists (on restarts) +# Note: fixturenet-eth-geth currently starts fresh on a restart +if [ -d "deployments/getting-started" ]; then + echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment" + + # Read JSON file into variable + SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json) + + # Parse JSON into variables + SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address') + SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash') + + if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then + echo "Deployment verfication successful, exiting" + exit 0 + else + echo "Deployment verfication failed, please clear L1 deployment volume before starting" + exit 1 + fi +fi + +# Generate the L2 account addresses +yarn hardhat rekey-json --output /l2-accounts/keys.json + +# Read JSON file into variable +KEYS_JSON=$(cat /l2-accounts/keys.json) + +# Parse JSON into variables +ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address') +ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey') +PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address') +BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address') +SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address') + +# Get the private keys of L1 accounts +if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \ + l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \ + [ "$l1_accounts_response" -eq 200 ]; +then + echo "Fetching L1 account credentials using provided URL" + mkdir -p /geth-accounts + wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL" + + CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2) + CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) + CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv) + CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) +else + echo "Couldn't fetch L1 account credentials, using them from env" +fi + +# Send balances to the above L2 addresses +yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started +yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started +yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started + +echo "Balances sent to L2 accounts" + +# Select a finalized L1 block as the starting point for roll ups +until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do + echo "Waiting for a finalized L1 block to exist, retrying after 10s" + sleep 10 +done + +L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}') +L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}') +L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}') + +echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups" + +# Update the deployment config +sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json +jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json + +node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH" + +echo "Updated the deployment config" + +# Create a .env file +echo "L1_RPC=$CERC_L1_RPC" > .env +echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env + +echo "Deploying the L1 smart contracts, this will take a while..." + +# Deploy the L1 smart contracts +yarn hardhat deploy --network getting-started --tags l1 + +echo "Deployed the L1 smart contracts" + +# Read Proxy contract's JSON and get the address +PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json) +PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address') + +# Send balance to the above Proxy contract in L1 for reflecting balance in L2 +# First account +yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started +# Second account +yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started + +echo "Balance sent to Proxy L2 contract" +echo "Use following accounts for transactions in L2:" +echo "${CERC_L1_ADDRESS}" +echo "${CERC_L1_ADDRESS_2}" +echo "Done" diff --git a/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js b/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js new file mode 100644 index 00000000..8a6c09d4 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/optimism-contracts/update-config.js @@ -0,0 +1,36 @@ +const fs = require('fs') + +// Get the command-line argument +const configFile = process.argv[2] +const adminAddress = process.argv[3] +const proposerAddress = process.argv[4] +const batcherAddress = process.argv[5] +const sequencerAddress = process.argv[6] +const blockHash = process.argv[7] + +// Read the JSON file +const configData = fs.readFileSync(configFile) +const configObj = JSON.parse(configData) + +// Update the finalSystemOwner property with the ADMIN_ADDRESS value +configObj.finalSystemOwner = + configObj.portalGuardian = + configObj.controller = + configObj.l2OutputOracleChallenger = + configObj.proxyAdminOwner = + configObj.baseFeeVaultRecipient = + configObj.l1FeeVaultRecipient = + configObj.sequencerFeeVaultRecipient = + configObj.governanceTokenOwner = + adminAddress + +configObj.l2OutputOracleProposer = proposerAddress + +configObj.batchSenderAddress = batcherAddress + +configObj.p2pSequencerAddress = sequencerAddress + +configObj.l1StartingBlockTag = blockHash + +// Write the updated JSON object back to the file +fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2)) diff --git a/build/lib/app/data/config/fixturenet-optimism/run-op-batcher.sh b/build/lib/app/data/config/fixturenet-optimism/run-op-batcher.sh new file mode 100755 index 00000000..18955545 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/run-op-batcher.sh @@ -0,0 +1,39 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +# Get Batcher key from keys.json +BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"') + +cleanup() { + echo "Signal received, cleaning up..." + kill ${batcher_pid} + + wait + echo "Done" +} +trap 'cleanup' INT TERM + +# Run op-batcher +op-batcher \ + --l2-eth-rpc=http://op-geth:8545 \ + --rollup-rpc=http://op-node:8547 \ + --poll-interval=1s \ + --sub-safety-margin=6 \ + --num-confirmations=1 \ + --safe-abort-nonce-too-low-count=3 \ + --resubmission-timeout=30s \ + --rpc.addr=0.0.0.0 \ + --rpc.port=8548 \ + --rpc.enable-admin \ + --max-channel-duration=1 \ + --l1-eth-rpc=$CERC_L1_RPC \ + --private-key=$BATCHER_KEY \ + & + +batcher_pid=$! +wait $batcher_pid diff --git a/build/lib/app/data/config/fixturenet-optimism/run-op-geth.sh b/build/lib/app/data/config/fixturenet-optimism/run-op-geth.sh new file mode 100755 index 00000000..8b521f85 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/run-op-geth.sh @@ -0,0 +1,90 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# TODO: Add in container build or use other tool +echo "Installing jq" +apk update && apk add jq + +# Get Sequencer key from keys.json +SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') + +# Initialize op-geth if datadir/geth not found +if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then + echo "Found existing datadir, checking block signer key" + + BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key) + + if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then + echo "Sequencer and block signer keys match, skipping initialization" + else + echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting" + exit 1 + fi +else + echo "Initializing op-geth" + + mkdir -p datadir + echo "pwd" > datadir/password + echo $SEQUENCER_KEY > datadir/block-signer-key + + geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key + + while [ ! -f "/op-node/jwt.txt" ] + do + echo "Config files not created. Checking after 5 seconds." + sleep 5 + done + + echo "Config files created by op-node, proceeding with the initialization..." + + geth init --datadir=datadir /op-node/genesis.json + echo "Node Initialized" +fi + +SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"') +echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}" + +cleanup() { + echo "Signal received, cleaning up..." + kill ${geth_pid} + + wait + echo "Done" +} +trap 'cleanup' INT TERM + +# Run op-geth +geth \ + --datadir ./datadir \ + --http \ + --http.corsdomain="*" \ + --http.vhosts="*" \ + --http.addr=0.0.0.0 \ + --http.api=web3,debug,eth,txpool,net,engine \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=8546 \ + --ws.origins="*" \ + --ws.api=debug,eth,txpool,net,engine \ + --syncmode=full \ + --gcmode=archive \ + --nodiscover \ + --maxpeers=0 \ + --networkid=42069 \ + --authrpc.vhosts="*" \ + --authrpc.addr=0.0.0.0 \ + --authrpc.port=8551 \ + --authrpc.jwtsecret=/op-node/jwt.txt \ + --rollup.disabletxpoolgossip=true \ + --password=./datadir/password \ + --allow-insecure-unlock \ + --mine \ + --miner.etherbase=$SEQUENCER_ADDRESS \ + --unlock=$SEQUENCER_ADDRESS \ + & + +geth_pid=$! +wait $geth_pid diff --git a/build/lib/app/data/config/fixturenet-optimism/run-op-node.sh b/build/lib/app/data/config/fixturenet-optimism/run-op-node.sh new file mode 100755 index 00000000..516cf0a5 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/run-op-node.sh @@ -0,0 +1,26 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +# Get Sequencer key from keys.json +SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"') + +# Run op-node +op-node \ + --l2=http://op-geth:8551 \ + --l2.jwt-secret=/op-node-data/jwt.txt \ + --sequencer.enabled \ + --sequencer.l1-confs=3 \ + --verifier.l1-confs=3 \ + --rollup.config=/op-node-data/rollup.json \ + --rpc.addr=0.0.0.0 \ + --rpc.port=8547 \ + --p2p.disable \ + --rpc.enable-admin \ + --p2p.sequencer.key=$SEQUENCER_KEY \ + --l1=$CERC_L1_RPC \ + --l1.rpckind=any diff --git a/build/lib/app/data/config/fixturenet-optimism/run-op-proposer.sh b/build/lib/app/data/config/fixturenet-optimism/run-op-proposer.sh new file mode 100755 index 00000000..09746760 --- /dev/null +++ b/build/lib/app/data/config/fixturenet-optimism/run-op-proposer.sh @@ -0,0 +1,36 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}" + +# Read the L2OutputOracle contract address from the deployment +L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json) +L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address') + +# Get Proposer key from keys.json +PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"') + +cleanup() { + echo "Signal received, cleaning up..." + kill ${proposer_pid} + + wait + echo "Done" +} +trap 'cleanup' INT TERM + +# Run op-proposer +op-proposer \ + --poll-interval 12s \ + --rpc.port 8560 \ + --rollup-rpc http://op-node:8547 \ + --l2oo-address $L2OO_ADDR \ + --private-key $PROPOSER_KEY \ + --l1-eth-rpc $CERC_L1_RPC \ + & + +proposer_pid=$! +wait $proposer_pid diff --git a/build/lib/app/data/config/foundry/foundry.toml b/build/lib/app/data/config/foundry/foundry.toml new file mode 100644 index 00000000..933f895c --- /dev/null +++ b/build/lib/app/data/config/foundry/foundry.toml @@ -0,0 +1,2 @@ +[profile.default] +eth-rpc-url = "http://fixturenet-eth-geth-1:8545" diff --git a/build/lib/app/data/config/ipld-eth-beacon-indexer/indexer.env b/build/lib/app/data/config/ipld-eth-beacon-indexer/indexer.env new file mode 100644 index 00000000..b19800aa --- /dev/null +++ b/build/lib/app/data/config/ipld-eth-beacon-indexer/indexer.env @@ -0,0 +1,31 @@ +# Match compose/docker-compose-ipld-eth-beacon-db.yml +POSTGRES_HOST=ipld-eth-beacon-db +POSTGRES_PORT=5432 +POSTGRES_DB=cerc_testing +POSTGRES_USER=vdbm +POSTGRES_PASSWORD=password + +# Match compose/docker-compose-fixturenet-eth.yml +LIGHTHOUSE_HOST=fixturenet-eth-lighthouse-1 +LIGHTHOUSE_PORT=8001 +LIGHTHOUSE_PROTOCOL=http + +CAPTURE_MODE=head +LOG_LEVEL=debug + +BC_MAX_HISTORIC_PROCESS_WORKER=2 +BC_UNIQUE_NODE_IDENTIFIER=1001 +BC_CHECK_DB=true +BC_BEACON_STATE_PROCESSING_ENABLED=false +BC_BEACON_BLOCK_PROCESSING_ENABLED=true +BC_MINIMUM_SLOT=0 + +KG_INCREMENT=10000 +KG_PROCESS_KNOWN_GAPS_ENABLED=true +KG_MAX_KNOWN_GAPS_WORKER=2 +KG_MINIMUM_SLOT=0 + +# Match compose/docker-compose-prometheus-grafana.yml +PROM_HOST=prometheus +PROM_PORT=9000 +PROM_METRICS_ENABLED=false diff --git a/build/lib/app/data/config/ipld-eth-server/chain.json b/build/lib/app/data/config/ipld-eth-server/chain.json new file mode 100644 index 00000000..60f17432 --- /dev/null +++ b/build/lib/app/data/config/ipld-eth-server/chain.json @@ -0,0 +1,15 @@ +{ + "chainId": 99, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "clique": { + "period": 0, + "epoch": 3000 + } +} diff --git a/build/lib/app/data/config/keycloak/import/cerc-realm.json b/build/lib/app/data/config/keycloak/import/cerc-realm.json new file mode 100644 index 00000000..e1e9dc97 --- /dev/null +++ b/build/lib/app/data/config/keycloak/import/cerc-realm.json @@ -0,0 +1,2087 @@ +{ + "id": "cerc", + "realm": "cerc", + "notBefore": 0, + "defaultSignatureAlgorithm": "RS256", + "revokeRefreshToken": false, + "refreshTokenMaxReuse": 0, + "accessTokenLifespan": 300, + "accessTokenLifespanForImplicitFlow": 900, + "ssoSessionIdleTimeout": 1800, + "ssoSessionMaxLifespan": 36000, + "ssoSessionIdleTimeoutRememberMe": 0, + "ssoSessionMaxLifespanRememberMe": 0, + "offlineSessionIdleTimeout": 2592000, + "offlineSessionMaxLifespanEnabled": false, + "offlineSessionMaxLifespan": 5184000, + "clientSessionIdleTimeout": 0, + "clientSessionMaxLifespan": 0, + "clientOfflineSessionIdleTimeout": 0, + "clientOfflineSessionMaxLifespan": 0, + "accessCodeLifespan": 60, + "accessCodeLifespanUserAction": 300, + "accessCodeLifespanLogin": 1800, + "actionTokenGeneratedByAdminLifespan": 43200, + "actionTokenGeneratedByUserLifespan": 300, + "oauth2DeviceCodeLifespan": 600, + "oauth2DevicePollingInterval": 5, + "enabled": true, + "sslRequired": "external", + "registrationAllowed": false, + "registrationEmailAsUsername": false, + "rememberMe": false, + "verifyEmail": false, + "loginWithEmailAllowed": true, + "duplicateEmailsAllowed": false, + "resetPasswordAllowed": false, + "editUsernameAllowed": false, + "bruteForceProtected": false, + "permanentLockout": false, + "maxFailureWaitSeconds": 900, + "minimumQuickLoginWaitSeconds": 60, + "waitIncrementSeconds": 60, + "quickLoginCheckMilliSeconds": 1000, + "maxDeltaTimeSeconds": 43200, + "failureFactor": 30, + "defaultRole": { + "id": "211646ea-04a3-467e-9f25-f7539a405d03", + "name": "default-roles-cerc", + "description": "${role_default-roles}", + "composite": true, + "clientRole": false, + "containerId": "cerc" + }, + "requiredCredentials": [ + "password" + ], + "otpPolicyType": "totp", + "otpPolicyAlgorithm": "HmacSHA1", + "otpPolicyInitialCounter": 0, + "otpPolicyDigits": 6, + "otpPolicyLookAheadWindow": 1, + "otpPolicyPeriod": 30, + "otpPolicyCodeReusable": false, + "otpSupportedApplications": [ + "totpAppGoogleName", + "totpAppFreeOTPName" + ], + "webAuthnPolicyRpEntityName": "keycloak", + "webAuthnPolicySignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyRpId": "", + "webAuthnPolicyAttestationConveyancePreference": "not specified", + "webAuthnPolicyAuthenticatorAttachment": "not specified", + "webAuthnPolicyRequireResidentKey": "not specified", + "webAuthnPolicyUserVerificationRequirement": "not specified", + "webAuthnPolicyCreateTimeout": 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyAcceptableAaguids": [], + "webAuthnPolicyPasswordlessRpEntityName": "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms": [ + "ES256" + ], + "webAuthnPolicyPasswordlessRpId": "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference": "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment": "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey": "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement": "not specified", + "webAuthnPolicyPasswordlessCreateTimeout": 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister": false, + "webAuthnPolicyPasswordlessAcceptableAaguids": [], + "users": [ + { + "id": "70af487b-c6d8-4f51-84d2-a23e8c9df7a3", + "createdTimestamp": 1670910521308, + "username": "service-account-dashboard-client", + "enabled": true, + "totp": false, + "emailVerified": false, + "serviceAccountClientId": "dashboard-client", + "disableableCredentialTypes": [], + "requiredActions": [], + "notBefore": 0 + } + ], + "scopeMappings": [ + { + "clientScope": "offline_access", + "roles": [ + "offline_access" + ] + } + ], + "clientScopeMappings": { + "account": [ + { + "client": "account-console", + "roles": [ + "manage-account", + "view-groups" + ] + } + ] + }, + "clients": [ + { + "id": "1ff40495-e44c-4cbc-886a-87c3ca1edc9d", + "clientId": "account", + "name": "${client_account}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/cerc/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/realms/cerc/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "75b3bc74-dd4d-4d0a-940c-f1a809c004a6", + "clientId": "account-console", + "name": "${client_account-console}", + "rootUrl": "${authBaseUrl}", + "baseUrl": "/realms/cerc/account/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "redirectUris": [ + "/realms/cerc/account/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+", + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "4ec0bc59-9111-46da-a7d3-549b7aa0e398", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + } + ], + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "0dc11c0a-b159-4e48-bdf9-31a1fccd25c6", + "clientId": "admin-cli", + "name": "${client_admin-cli}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": false, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "c8a751e8-08be-427f-9191-3bdc0cc3e829", + "clientId": "broker", + "name": "${client_broker}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "2d7384c7-9301-4a57-8fb5-b42aa43b8d3f", + "clientId": "dashboard-client", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "http://localhost:8180/*" + ], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": true, + "authorizationServicesEnabled": true, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "saml.assertion.signature": "false", + "saml.force.post.binding": "false", + "saml.multivalued.roles": "false", + "saml.encrypt": "false", + "post.logout.redirect.uris": "+", + "saml.server.signature": "false", + "saml.server.signature.keyinfo.ext": "false", + "exclude.session.state.from.auth.response": "false", + "saml_force_name_id_format": "false", + "saml.client.signature": "false", + "tls.client.certificate.bound.access.tokens": "false", + "saml.authnstatement": "false", + "display.on.consent.screen": "false", + "saml.onetimeuse.condition": "false" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": true, + "nodeReRegistrationTimeout": -1, + "protocolMappers": [ + { + "id": "5746e878-a248-4170-9f6e-221dad215e25", + "name": "Client ID", + "protocol": "openid-connect", + "protocolMapper": "oidc-usersessionmodel-note-mapper", + "consentRequired": false, + "config": { + "user.session.note": "clientId", + "userinfo.token.claim": "true", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "clientId", + "jsonType.label": "String" + } + }, + { + "id": "e584082b-a232-45bd-8520-bc88908642a1", + "name": "Client IP Address", + "protocol": "openid-connect", + "protocolMapper": "oidc-usersessionmodel-note-mapper", + "consentRequired": false, + "config": { + "user.session.note": "clientAddress", + "userinfo.token.claim": "true", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "clientAddress", + "jsonType.label": "String" + } + }, + { + "id": "bd9eaacb-6c5b-4bf1-bc0d-2457f7f7a767", + "name": "api-key", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "api-key", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "api-key", + "jsonType.label": "String" + } + }, + { + "id": "a10834b6-005a-4083-84e7-69ea2c08c0a8", + "name": "Client Host", + "protocol": "openid-connect", + "protocolMapper": "oidc-usersessionmodel-note-mapper", + "consentRequired": false, + "config": { + "user.session.note": "clientHost", + "userinfo.token.claim": "true", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "clientHost", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ], + "authorizationSettings": { + "allowRemoteResourceManagement": true, + "policyEnforcementMode": "ENFORCING", + "resources": [ + { + "name": "Default Resource", + "type": "urn:dashboard-client:resources:default", + "ownerManagedAccess": false, + "attributes": {}, + "_id": "fd85dada-073c-4da0-ac3c-73a823e86e70", + "uris": [ + "/*" + ] + } + ], + "policies": [], + "scopes": [], + "decisionStrategy": "UNANIMOUS" + } + }, + { + "id": "1a91181f-823b-4cbf-9d7a-f5f097a00d73", + "clientId": "realm-management", + "name": "${client_realm-management}", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [], + "webOrigins": [], + "notBefore": 0, + "bearerOnly": true, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": false, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + }, + { + "id": "1c10f8e8-6553-4d39-a705-8380214a01c9", + "clientId": "security-admin-console", + "name": "${client_security-admin-console}", + "rootUrl": "${authAdminUrl}", + "baseUrl": "/admin/cerc/console/", + "surrogateAuthRequired": false, + "enabled": true, + "alwaysDisplayInConsole": false, + "clientAuthenticatorType": "client-secret", + "secret": "**********", + "redirectUris": [ + "/admin/cerc/console/*" + ], + "webOrigins": [ + "+" + ], + "notBefore": 0, + "bearerOnly": false, + "consentRequired": false, + "standardFlowEnabled": true, + "implicitFlowEnabled": false, + "directAccessGrantsEnabled": false, + "serviceAccountsEnabled": false, + "publicClient": true, + "frontchannelLogout": false, + "protocol": "openid-connect", + "attributes": { + "post.logout.redirect.uris": "+", + "pkce.code.challenge.method": "S256" + }, + "authenticationFlowBindingOverrides": {}, + "fullScopeAllowed": false, + "nodeReRegistrationTimeout": 0, + "protocolMappers": [ + { + "id": "e65eaf73-6a5d-44da-a129-930481351e5e", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + } + ], + "defaultClientScopes": [ + "web-origins", + "profile", + "roles", + "email" + ], + "optionalClientScopes": [ + "address", + "phone", + "offline_access", + "microprofile-jwt" + ] + } + ], + "clientScopes": [ + { + "id": "6e3c0398-187d-4515-9fad-e09225e6484c", + "name": "email", + "description": "OpenID Connect built-in scope: email", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${emailScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "7e81f77f-8631-46a0-979a-7744ea451880", + "name": "email", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "email", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email", + "jsonType.label": "String" + } + }, + { + "id": "b41d73c7-5ae4-4492-9f05-fe737bbd8a9b", + "name": "email verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "emailVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "email_verified", + "jsonType.label": "boolean" + } + } + ] + }, + { + "id": "42c276ef-e93e-4e65-a963-b84a7b229449", + "name": "microprofile-jwt", + "description": "Microprofile - JWT built-in scope", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "145a68c2-075a-417c-bafb-824c0bb02dd2", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "multivalued": "true", + "userinfo.token.claim": "true", + "user.attribute": "foo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "groups", + "jsonType.label": "String" + } + }, + { + "id": "d9f7cb53-ae29-47e0-aaf8-edd40acfa5b9", + "name": "upn", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "upn", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "c88a720f-8fe6-4750-81b6-b87551066905", + "name": "role_list", + "description": "SAML role list", + "protocol": "saml", + "attributes": { + "consent.screen.text": "${samlRoleListScopeConsentText}", + "display.on.consent.screen": "true" + }, + "protocolMappers": [ + { + "id": "0244f0c4-773e-40e3-a0e4-308f5b10ab78", + "name": "role list", + "protocol": "saml", + "protocolMapper": "saml-role-list-mapper", + "consentRequired": false, + "config": { + "single": "false", + "attribute.nameformat": "Basic", + "attribute.name": "Role" + } + } + ] + }, + { + "id": "ba66e4d5-12f9-4c44-921d-42d901485803", + "name": "web-origins", + "description": "OpenID Connect scope for add allowed web origins to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false", + "consent.screen.text": "" + }, + "protocolMappers": [ + { + "id": "0f2f1ccf-7292-4e49-a079-d9166ec100bb", + "name": "allowed web origins", + "protocol": "openid-connect", + "protocolMapper": "oidc-allowed-origins-mapper", + "consentRequired": false, + "config": {} + } + ] + }, + { + "id": "e73a3670-4958-43bc-b5fa-982a895bc8d4", + "name": "phone", + "description": "OpenID Connect built-in scope: phone", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${phoneScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "bf04e15d-711a-4f66-b6f4-c35f21fcb0c8", + "name": "phone number", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumber", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number", + "jsonType.label": "String" + } + }, + { + "id": "76493880-66bf-40d9-8f41-b14a8d400b1d", + "name": "phone number verified", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "phoneNumberVerified", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "phone_number_verified", + "jsonType.label": "boolean" + } + } + ] + }, + { + "id": "5cb4b2c4-880e-4437-b905-19a5eb471765", + "name": "profile", + "description": "OpenID Connect built-in scope: profile", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${profileScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "f7ba27e0-141e-4389-93d2-cc6c5fb1f78a", + "name": "family name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "lastName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "family_name", + "jsonType.label": "String" + } + }, + { + "id": "f3c2b39e-a11b-4640-acb3-c6ce139235e5", + "name": "zoneinfo", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "zoneinfo", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "zoneinfo", + "jsonType.label": "String" + } + }, + { + "id": "bbf1c241-15c1-4d94-812a-ad4e260f77df", + "name": "picture", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "picture", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "picture", + "jsonType.label": "String" + } + }, + { + "id": "0dc161e8-f2e8-4017-b895-c24a78d38e92", + "name": "birthdate", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "birthdate", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "birthdate", + "jsonType.label": "String" + } + }, + { + "id": "86761664-57a4-47df-a891-d0d721243327", + "name": "nickname", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "nickname", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "nickname", + "jsonType.label": "String" + } + }, + { + "id": "20f086d5-a07c-4711-88aa-3396fafb2adf", + "name": "full name", + "protocol": "openid-connect", + "protocolMapper": "oidc-full-name-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + }, + { + "id": "d79a8b71-9312-4658-b14b-8f3145052116", + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } + }, + { + "id": "ad342e65-e36a-48cc-a90a-d48aacefab01", + "name": "profile", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "profile", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "profile", + "jsonType.label": "String" + } + }, + { + "id": "6abd60fb-39b7-4063-aaee-5ff380f0a97e", + "name": "gender", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "gender", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "gender", + "jsonType.label": "String" + } + }, + { + "id": "580133fc-8e44-4e7a-a526-dcbc7d82c911", + "name": "website", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "website", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "website", + "jsonType.label": "String" + } + }, + { + "id": "4bcde3c0-41ef-45e6-a23b-aea222640399", + "name": "locale", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "locale", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "locale", + "jsonType.label": "String" + } + }, + { + "id": "1ed7844e-9002-4c7b-be3d-61f9b3c725b9", + "name": "middle name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "middleName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "middle_name", + "jsonType.label": "String" + } + }, + { + "id": "4c9e9ec5-f40d-4b6e-9385-f86b0d228940", + "name": "updated at", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-attribute-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "updatedAt", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "updated_at", + "jsonType.label": "String" + } + }, + { + "id": "bf10082b-d485-4cf4-bf31-f0181884e8cf", + "name": "given name", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "consentRequired": false, + "config": { + "userinfo.token.claim": "true", + "user.attribute": "firstName", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "given_name", + "jsonType.label": "String" + } + } + ] + }, + { + "id": "b25abfe5-1130-4d7d-98f4-227f8b0dc4f9", + "name": "roles", + "description": "OpenID Connect scope for add user roles to the access token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "true", + "consent.screen.text": "${rolesScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "84b22a06-dced-4b2f-bbc8-f818b01c73d0", + "name": "realm roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-realm-role-mapper", + "consentRequired": false, + "config": { + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "realm_access.roles", + "jsonType.label": "String", + "multivalued": "true" + } + }, + { + "id": "5c6ed3cf-0840-4191-81ea-7092569f70fe", + "name": "audience resolve", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-resolve-mapper", + "consentRequired": false, + "config": {} + }, + { + "id": "dce34b2a-e58f-41b8-86ab-794edeccae40", + "name": "client roles", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-client-role-mapper", + "consentRequired": false, + "config": { + "user.attribute": "foo", + "access.token.claim": "true", + "claim.name": "resource_access.${client_id}.roles", + "jsonType.label": "String", + "multivalued": "true" + } + } + ] + }, + { + "id": "00476d55-cd2f-4f60-92dd-6f3ff634799e", + "name": "acr", + "description": "OpenID Connect scope for add acr (authentication context class reference) to the token", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "false", + "display.on.consent.screen": "false" + }, + "protocolMappers": [ + { + "id": "f0ae1247-2120-4513-b1d1-fab7cfecfbb8", + "name": "acr loa level", + "protocol": "openid-connect", + "protocolMapper": "oidc-acr-mapper", + "consentRequired": false, + "config": { + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } + } + ] + }, + { + "id": "3f68af4c-10e8-4351-a62d-f829b9832037", + "name": "address", + "description": "OpenID Connect built-in scope: address", + "protocol": "openid-connect", + "attributes": { + "include.in.token.scope": "true", + "display.on.consent.screen": "true", + "consent.screen.text": "${addressScopeConsentText}" + }, + "protocolMappers": [ + { + "id": "3037e6e9-e1d7-492c-a060-9b2c35c688cb", + "name": "address", + "protocol": "openid-connect", + "protocolMapper": "oidc-address-mapper", + "consentRequired": false, + "config": { + "user.attribute.formatted": "formatted", + "user.attribute.country": "country", + "user.attribute.postal_code": "postal_code", + "userinfo.token.claim": "true", + "user.attribute.street": "street", + "id.token.claim": "true", + "user.attribute.region": "region", + "access.token.claim": "true", + "user.attribute.locality": "locality" + } + } + ] + }, + { + "id": "f900704b-5e92-451e-b093-02286cc22774", + "name": "offline_access", + "description": "OpenID Connect built-in scope: offline_access", + "protocol": "openid-connect", + "attributes": { + "consent.screen.text": "${offlineAccessScopeConsentText}", + "display.on.consent.screen": "true" + } + } + ], + "defaultDefaultClientScopes": [ + "profile", + "email", + "roles", + "web-origins", + "role_list", + "acr" + ], + "defaultOptionalClientScopes": [ + "address", + "microprofile-jwt", + "phone", + "offline_access" + ], + "browserSecurityHeaders": { + "contentSecurityPolicyReportOnly": "", + "xContentTypeOptions": "nosniff", + "xRobotsTag": "none", + "xFrameOptions": "SAMEORIGIN", + "contentSecurityPolicy": "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection": "1; mode=block", + "strictTransportSecurity": "max-age=31536000; includeSubDomains" + }, + "smtpServer": {}, + "accountTheme": "custom", + "eventsEnabled": true, + "eventsExpiration": 604800, + "eventsListeners": [ + "api-key-registration-generation", + "metrics-listener", + "jboss-logging" + ], + "enabledEventTypes": [ + "SEND_RESET_PASSWORD", + "REMOVE_TOTP", + "REVOKE_GRANT", + "UPDATE_TOTP", + "LOGIN_ERROR", + "CLIENT_LOGIN", + "RESET_PASSWORD_ERROR", + "IMPERSONATE_ERROR", + "CODE_TO_TOKEN_ERROR", + "CUSTOM_REQUIRED_ACTION", + "RESTART_AUTHENTICATION", + "IMPERSONATE", + "UPDATE_PROFILE_ERROR", + "LOGIN", + "UPDATE_PASSWORD_ERROR", + "CLIENT_INITIATED_ACCOUNT_LINKING", + "TOKEN_EXCHANGE", + "LOGOUT", + "REGISTER", + "CLIENT_REGISTER", + "IDENTITY_PROVIDER_LINK_ACCOUNT", + "UPDATE_PASSWORD", + "CLIENT_DELETE", + "FEDERATED_IDENTITY_LINK_ERROR", + "IDENTITY_PROVIDER_FIRST_LOGIN", + "CLIENT_DELETE_ERROR", + "VERIFY_EMAIL", + "CLIENT_LOGIN_ERROR", + "RESTART_AUTHENTICATION_ERROR", + "EXECUTE_ACTIONS", + "REMOVE_FEDERATED_IDENTITY_ERROR", + "TOKEN_EXCHANGE_ERROR", + "PERMISSION_TOKEN", + "SEND_IDENTITY_PROVIDER_LINK_ERROR", + "EXECUTE_ACTION_TOKEN_ERROR", + "SEND_VERIFY_EMAIL", + "EXECUTE_ACTIONS_ERROR", + "REMOVE_FEDERATED_IDENTITY", + "IDENTITY_PROVIDER_POST_LOGIN", + "IDENTITY_PROVIDER_LINK_ACCOUNT_ERROR", + "UPDATE_EMAIL", + "REGISTER_ERROR", + "REVOKE_GRANT_ERROR", + "EXECUTE_ACTION_TOKEN", + "LOGOUT_ERROR", + "UPDATE_EMAIL_ERROR", + "CLIENT_UPDATE_ERROR", + "UPDATE_PROFILE", + "CLIENT_REGISTER_ERROR", + "FEDERATED_IDENTITY_LINK", + "SEND_IDENTITY_PROVIDER_LINK", + "SEND_VERIFY_EMAIL_ERROR", + "RESET_PASSWORD", + "CLIENT_INITIATED_ACCOUNT_LINKING_ERROR", + "REMOVE_TOTP_ERROR", + "VERIFY_EMAIL_ERROR", + "SEND_RESET_PASSWORD_ERROR", + "CLIENT_UPDATE", + "CUSTOM_REQUIRED_ACTION_ERROR", + "IDENTITY_PROVIDER_POST_LOGIN_ERROR", + "UPDATE_TOTP_ERROR", + "CODE_TO_TOKEN", + "IDENTITY_PROVIDER_FIRST_LOGIN_ERROR" + ], + "adminEventsEnabled": false, + "adminEventsDetailsEnabled": false, + "identityProviders": [], + "identityProviderMappers": [], + "components": { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [ + { + "id": "e108ed9d-422a-4c84-af0e-d7ea9ddc1890", + "name": "Trusted Hosts", + "providerId": "trusted-hosts", + "subType": "anonymous", + "subComponents": {}, + "config": { + "host-sending-registration-request-must-match": [ + "true" + ], + "client-uris-must-match": [ + "true" + ] + } + }, + { + "id": "b9a4a7f7-d684-45bd-b4bf-646be1f79364", + "name": "Full Scope Disabled", + "providerId": "scope", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "8df4222e-0b62-44dc-be51-f27d828f0f66", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "saml-user-property-mapper", + "oidc-sha256-pairwise-sub-mapper", + "oidc-full-name-mapper", + "saml-role-list-mapper", + "oidc-usermodel-property-mapper", + "oidc-usermodel-attribute-mapper", + "saml-user-attribute-mapper", + "oidc-address-mapper" + ] + } + }, + { + "id": "59dd3e18-4dbe-4054-b012-423e8c4da909", + "name": "Max Clients Limit", + "providerId": "max-clients", + "subType": "anonymous", + "subComponents": {}, + "config": { + "max-clients": [ + "200" + ] + } + }, + { + "id": "7ce212c8-2587-4f6c-8824-705eabb7f925", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + }, + { + "id": "4cbfdd25-6c33-4bad-8d88-9a1aec6c8e25", + "name": "Consent Required", + "providerId": "consent-required", + "subType": "anonymous", + "subComponents": {}, + "config": {} + }, + { + "id": "bd008843-3c81-4750-ae85-a5e4e181b877", + "name": "Allowed Protocol Mapper Types", + "providerId": "allowed-protocol-mappers", + "subType": "authenticated", + "subComponents": {}, + "config": { + "allowed-protocol-mapper-types": [ + "oidc-usermodel-property-mapper", + "saml-role-list-mapper", + "saml-user-property-mapper", + "oidc-full-name-mapper", + "oidc-address-mapper", + "oidc-sha256-pairwise-sub-mapper", + "saml-user-attribute-mapper", + "oidc-usermodel-attribute-mapper" + ] + } + }, + { + "id": "2edf8e74-e1b6-4e6d-83a3-c1123d462d14", + "name": "Allowed Client Scopes", + "providerId": "allowed-client-templates", + "subType": "anonymous", + "subComponents": {}, + "config": { + "allow-default-scopes": [ + "true" + ] + } + } + ], + "org.keycloak.userprofile.UserProfileProvider": [ + { + "id": "bfd8d11c-d90c-4620-802d-2b5bb04ed9d3", + "providerId": "declarative-user-profile", + "subComponents": {}, + "config": {} + } + ], + "org.keycloak.keys.KeyProvider": [ + { + "id": "ca2afd56-df5d-47ab-bea4-4416c859a338", + "name": "hmac-generated", + "providerId": "hmac-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ], + "algorithm": [ + "HS256" + ] + } + }, + { + "id": "c72d323d-5737-4bed-bbc9-41be440e99fb", + "name": "rsa-generated", + "providerId": "rsa-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + }, + { + "id": "f80ab6e7-1b0a-4de4-acaa-3275d3f867a2", + "name": "aes-generated", + "providerId": "aes-generated", + "subComponents": {}, + "config": { + "priority": [ + "100" + ] + } + } + ] + }, + "internationalizationEnabled": false, + "supportedLocales": [], + "authenticationFlows": [ + { + "id": "43505ad9-3c8d-4f11-9f90-55bcf19e621b", + "alias": "Handle Existing Account", + "description": "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-confirm-link", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "Handle Existing Account - Alternatives - 0", + "userSetupAllowed": false + } + ] + }, + { + "id": "f5a8bcf1-b58f-4fd9-a0c1-4ec3933d9d64", + "alias": "Handle Existing Account - Alternatives - 0", + "description": "Subflow of Handle Existing Account with alternative executions", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-email-verification", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "ALTERNATIVE", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "Verify Existing Account by Re-authentication", + "userSetupAllowed": false + } + ] + }, + { + "id": "b3f19451-b375-4341-8c23-f9a3b531ceb0", + "alias": "Verify Existing Account by Re-authentication", + "description": "Reauthentication of existing account", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "idp-username-password-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "CONDITIONAL", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "Verify Existing Account by Re-authentication - auth-otp-form - Conditional", + "userSetupAllowed": false + } + ] + }, + { + "id": "0db81a1c-dd36-4721-89e4-19dc7e204b56", + "alias": "Verify Existing Account by Re-authentication - auth-otp-form - Conditional", + "description": "Flow to determine if the auth-otp-form authenticator should be used or not.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-otp-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "e0937686-c0c4-41b2-8abd-98b5219e1953", + "alias": "browser", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-spnego", + "authenticatorFlow": false, + "requirement": "DISABLED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "identity-provider-redirector", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 25, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "ALTERNATIVE", + "priority": 30, + "autheticatorFlow": true, + "flowAlias": "forms", + "userSetupAllowed": false + } + ] + }, + { + "id": "3508fa7b-a459-44ad-b56a-af9737ed86a5", + "alias": "browser plus basic", + "description": "browser based authentication", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": false, + "authenticationExecutions": [ + { + "authenticator": "auth-cookie", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "basic-auth", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-spnego", + "authenticatorFlow": false, + "requirement": "DISABLED", + "priority": 25, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "identity-provider-redirector", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 30, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "ALTERNATIVE", + "priority": 31, + "autheticatorFlow": true, + "flowAlias": "browser plus basic forms", + "userSetupAllowed": false + } + ] + }, + { + "id": "79ee49ad-20f2-4967-a9bf-ddca82c1516c", + "alias": "browser plus basic forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": false, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "CONDITIONAL", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "browser plus basic forms - auth-otp-form - Conditional", + "userSetupAllowed": false + } + ] + }, + { + "id": "802ce2dc-dd4a-45e6-837e-fecc17affe55", + "alias": "browser plus basic forms - auth-otp-form - Conditional", + "description": "Flow to determine if the auth-otp-form authenticator should be used or not.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": false, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-otp-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "0f4a4d19-db06-409b-baa8-a3c8a6f52a22", + "alias": "clients", + "description": "Base authentication for clients", + "providerId": "client-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "client-secret", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "client-jwt", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "client-secret-jwt", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 30, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "client-x509", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 40, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "b177d3f1-dad8-4b40-ac1d-04038f0e5a7d", + "alias": "direct grant", + "description": "OpenID Connect Resource Owner Grant", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "direct-grant-validate-username", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "direct-grant-validate-password", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "CONDITIONAL", + "priority": 30, + "autheticatorFlow": true, + "flowAlias": "direct grant - direct-grant-validate-otp - Conditional", + "userSetupAllowed": false + } + ] + }, + { + "id": "788ccbc9-c3c8-468d-8d4c-d2eb04b438a5", + "alias": "direct grant - direct-grant-validate-otp - Conditional", + "description": "Flow to determine if the direct-grant-validate-otp authenticator should be used or not.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "direct-grant-validate-otp", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "8edd3a8f-7d9d-4029-8fd2-21a8ead2b090", + "alias": "docker auth", + "description": "Used by Docker clients to authenticate against the IDP", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "docker-http-basic-authenticator", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "a67bc8ee-b99a-409f-adf5-a7d4c7f27512", + "alias": "first broker login", + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "review profile config", + "authenticator": "idp-review-profile", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "first broker login - Alternatives - 0", + "userSetupAllowed": false + } + ] + }, + { + "id": "ffe8dad9-6998-4358-ab2c-061cf7235d53", + "alias": "first broker login - Alternatives - 0", + "description": "Subflow of first broker login with alternative executions", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticatorConfig": "create unique user config", + "authenticator": "idp-create-user-if-unique", + "authenticatorFlow": false, + "requirement": "ALTERNATIVE", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "ALTERNATIVE", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "Handle Existing Account", + "userSetupAllowed": false + } + ] + }, + { + "id": "26133bdd-6657-449d-a823-73519956b272", + "alias": "forms", + "description": "Username, password, otp and other auth forms.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "auth-username-password-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "CONDITIONAL", + "priority": 20, + "autheticatorFlow": true, + "flowAlias": "forms - auth-otp-form - Conditional", + "userSetupAllowed": false + } + ] + }, + { + "id": "57620e5a-f7cd-4e88-ac51-d78e91ff7868", + "alias": "forms - auth-otp-form - Conditional", + "description": "Flow to determine if the auth-otp-form authenticator should be used or not.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-otp-form", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "cffbb5df-de0a-49ed-9136-296a877ab175", + "alias": "http challenge", + "description": "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "no-cookie-redirect", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "basic-auth", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "basic-auth-otp", + "authenticatorFlow": false, + "requirement": "DISABLED", + "priority": 30, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "auth-spnego", + "authenticatorFlow": false, + "requirement": "DISABLED", + "priority": 40, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "6ac5a9df-dacb-462c-9b12-207470e9fcbf", + "alias": "registration", + "description": "registration flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-page-form", + "authenticatorFlow": true, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": true, + "flowAlias": "registration form", + "userSetupAllowed": false + } + ] + }, + { + "id": "27e40f78-ce1e-4ad4-9b48-88a8bf9c8d92", + "alias": "registration form", + "description": "registration form", + "providerId": "form-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "registration-user-creation", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "registration-profile-action", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 40, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "registration-password-action", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 50, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "registration-recaptcha-action", + "authenticatorFlow": false, + "requirement": "DISABLED", + "priority": 60, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "31340e3b-f6c7-49ce-94ac-f28213b84be6", + "alias": "reset credentials", + "description": "Reset credentials for a user if they forgot their password or something", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "reset-credentials-choose-user", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "reset-credential-email", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "reset-password", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 30, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticatorFlow": true, + "requirement": "CONDITIONAL", + "priority": 40, + "autheticatorFlow": true, + "flowAlias": "reset credentials - reset-otp - Conditional", + "userSetupAllowed": false + } + ] + }, + { + "id": "aee4a6d9-caab-463e-ad62-48aba91a4098", + "alias": "reset credentials - reset-otp - Conditional", + "description": "Flow to determine if the reset-otp authenticator should be used or not.", + "providerId": "basic-flow", + "topLevel": false, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "conditional-user-configured", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + }, + { + "authenticator": "reset-otp", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 20, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + }, + { + "id": "4052bdf6-9b94-42a1-b199-0c14ffe67ac5", + "alias": "saml ecp", + "description": "SAML ECP Profile Authentication Flow", + "providerId": "basic-flow", + "topLevel": true, + "builtIn": true, + "authenticationExecutions": [ + { + "authenticator": "http-basic-authenticator", + "authenticatorFlow": false, + "requirement": "REQUIRED", + "priority": 10, + "autheticatorFlow": false, + "userSetupAllowed": false + } + ] + } + ], + "authenticatorConfig": [ + { + "id": "4bc95f52-8c28-449c-830b-a4ffc3340399", + "alias": "create unique user config", + "config": { + "require.password.update.after.registration": "false" + } + }, + { + "id": "367a56fc-c128-43f8-85d5-50ceae63b7aa", + "alias": "review profile config", + "config": { + "update.profile.on.first.login": "missing" + } + } + ], + "requiredActions": [ + { + "alias": "CONFIGURE_TOTP", + "name": "Configure OTP", + "providerId": "CONFIGURE_TOTP", + "enabled": true, + "defaultAction": false, + "priority": 10, + "config": {} + }, + { + "alias": "terms_and_conditions", + "name": "Terms and Conditions", + "providerId": "terms_and_conditions", + "enabled": false, + "defaultAction": false, + "priority": 20, + "config": {} + }, + { + "alias": "UPDATE_PASSWORD", + "name": "Update Password", + "providerId": "UPDATE_PASSWORD", + "enabled": true, + "defaultAction": false, + "priority": 30, + "config": {} + }, + { + "alias": "UPDATE_PROFILE", + "name": "Update Profile", + "providerId": "UPDATE_PROFILE", + "enabled": true, + "defaultAction": false, + "priority": 40, + "config": {} + }, + { + "alias": "VERIFY_EMAIL", + "name": "Verify Email", + "providerId": "VERIFY_EMAIL", + "enabled": true, + "defaultAction": false, + "priority": 50, + "config": {} + }, + { + "alias": "delete_account", + "name": "Delete Account", + "providerId": "delete_account", + "enabled": false, + "defaultAction": false, + "priority": 60, + "config": {} + }, + { + "alias": "update_user_locale", + "name": "Update User Locale", + "providerId": "update_user_locale", + "enabled": true, + "defaultAction": false, + "priority": 1000, + "config": {} + } + ], + "browserFlow": "browser plus basic", + "registrationFlow": "registration", + "directGrantFlow": "direct grant", + "resetCredentialsFlow": "reset credentials", + "clientAuthenticationFlow": "clients", + "dockerAuthenticationFlow": "docker auth", + "attributes": { + "cibaBackchannelTokenDeliveryMode": "poll", + "cibaExpiresIn": "120", + "cibaAuthRequestedUserHint": "login_hint", + "oauth2DeviceCodeLifespan": "600", + "clientOfflineSessionMaxLifespan": "0", + "oauth2DevicePollingInterval": "5", + "clientSessionIdleTimeout": "0", + "parRequestUriLifespan": "60", + "clientSessionMaxLifespan": "0", + "clientOfflineSessionIdleTimeout": "0", + "cibaInterval": "5", + "realmReusableOtpCode": "false" + }, + "keycloakVersion": "20.0.2", + "userManagedAccessAllowed": false, + "clientProfiles": { + "profiles": [] + }, + "clientPolicies": { + "policies": [] + } +} \ No newline at end of file diff --git a/build/lib/app/data/config/keycloak/keycloak.env b/build/lib/app/data/config/keycloak/keycloak.env new file mode 100644 index 00000000..0bc7bf15 --- /dev/null +++ b/build/lib/app/data/config/keycloak/keycloak.env @@ -0,0 +1,17 @@ +POSTGRES_DB=keycloak +POSTGRES_USER=keycloak +POSTGRES_PASSWORD=keycloak +KC_DB=postgres +KC_DB_URL_HOST=keycloak-db +KC_DB_URL_DATABASE=${POSTGRES_DB} +KC_DB_USERNAME=${POSTGRES_USER} +KC_DB_PASSWORD=${POSTGRES_PASSWORD} +KC_DB_SCHEMA=public +KC_HOSTNAME=localhost +KC_HTTP_ENABLED="true" +KC_HTTP_RELATIVE_PATH="/auth" +KC_HOSTNAME_STRICT_HTTPS="false" +KEYCLOAK_ADMIN=admin +KEYCLOAK_ADMIN_PASSWORD=admin +X_API_CHECK_REALM=cerc +X_API_CHECK_CLIENT_ID="%api_key%" diff --git a/build/lib/app/data/config/keycloak/nginx/keycloak_proxy.conf b/build/lib/app/data/config/keycloak/nginx/keycloak_proxy.conf new file mode 100644 index 00000000..dd69680b --- /dev/null +++ b/build/lib/app/data/config/keycloak/nginx/keycloak_proxy.conf @@ -0,0 +1,72 @@ +server { + listen 80; + listen [::]:80; + server_name localhost; + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + +### geth + location ~ ^/eth/?([^/]*)$ { + set $apiKey $1; + if ($apiKey = '') { + set $apiKey $http_X_API_KEY; + } + auth_request /auth; + proxy_buffering off; + rewrite /.*$ / break; + proxy_pass http://fixturenet-eth-geth-1:8545; + } + +## ipld-eth-server +# location ~ ^/ipld/eth/([^/]*)$ { +# set $apiKey $1; +# if ($apiKey = '') { +# set $apiKey $http_X_API_KEY; +# } +# auth_request /auth; +# auth_request_set $user_id $sent_http_x_user_id; +# proxy_buffering off; +# rewrite /.*$ / break; +# proxy_pass http://ipld-eth-server:8081; +# proxy_set_header X-Original-Remote-Addr $remote_addr; +# proxy_set_header X-User-Id $user_id; +# } +# +# location ~ ^/ipld/gql/([^/]*)$ { +# set $apiKey $1; +# if ($apiKey = '') { +# set $apiKey $http_X_API_KEY; +# } +# auth_request /auth; +# proxy_buffering off; +# rewrite /.*$ / break; +# proxy_pass http://ipld-eth-server:8082; +# } + +## lighthouse + location /beacon/ { + set $apiKey $http_X_API_KEY; + auth_request /auth; + proxy_buffering off; + proxy_pass http://fixturenet-eth-lighthouse-1:8001/; + } + + location = /auth { + internal; + proxy_buffering off; + resolver 127.0.0.11 ipv6=off; + proxy_pass http://keycloak:8080/auth/realms/cerc/check?apiKey=$apiKey; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Original-Remote-Addr $remote_addr; + proxy_set_header X-Original-Host $host; + } + + location = /stub_status { + stub_status; + } +} diff --git a/build/lib/app/data/config/postgresql/create-pg-stat-statements.sql b/build/lib/app/data/config/postgresql/create-pg-stat-statements.sql new file mode 100644 index 00000000..d3c6679b --- /dev/null +++ b/build/lib/app/data/config/postgresql/create-pg-stat-statements.sql @@ -0,0 +1 @@ +CREATE EXTENSION pg_stat_statements; diff --git a/build/lib/app/data/config/postgresql/multiple-postgressql-databases.sh b/build/lib/app/data/config/postgresql/multiple-postgressql-databases.sh new file mode 100755 index 00000000..375ff9eb --- /dev/null +++ b/build/lib/app/data/config/postgresql/multiple-postgressql-databases.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e +set -u + +function create_user_and_database() { + local database=$1 + echo " Creating user and database '$database'" + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE DATABASE "$database"; + GRANT ALL PRIVILEGES ON DATABASE "$database" TO $POSTGRES_USER; +EOSQL +} + +function create_extension() { + local database=$(echo $1 | tr ':' ' ' | awk '{print $1}') + local extension=$(echo $1 | tr ':' ' ' | awk '{print $2}') + echo " Creating database '$database' extension '$extension'" + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" "$database" <<-EOSQL + CREATE EXTENSION "$extension"; +EOSQL +} + +if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then + echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" + for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do + create_user_and_database $db + done + echo "Multiple databases created" +fi + +if [ -n "$POSTGRES_EXTENSION" ]; then + echo "Extension database creation requested: $POSTGRES_EXTENSION" + for db in $(echo $POSTGRES_EXTENSION | tr ',' ' '); do + create_extension $db + done + echo "Extensions created" +fi diff --git a/build/lib/app/data/config/tx-spammer/tx-spammer.env b/build/lib/app/data/config/tx-spammer/tx-spammer.env new file mode 100644 index 00000000..35534265 --- /dev/null +++ b/build/lib/app/data/config/tx-spammer/tx-spammer.env @@ -0,0 +1,2 @@ +ETH_CALL_FREQ=1000 +ETH_SEND_FREQ=1000 diff --git a/build/lib/app/data/config/wait-for-it.sh b/build/lib/app/data/config/wait-for-it.sh new file mode 100755 index 00000000..d990e0d3 --- /dev/null +++ b/build/lib/app/data/config/wait-for-it.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# Check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) + +WAITFORIT_BUSYTIMEFLAG="" +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi +else + WAITFORIT_ISBUSY=0 +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi diff --git a/build/lib/app/data/config/watcher-erc20/erc20-watcher.toml b/build/lib/app/data/config/watcher-erc20/erc20-watcher.toml new file mode 100644 index 00000000..20773321 --- /dev/null +++ b/build/lib/app/data/config/watcher-erc20/erc20-watcher.toml @@ -0,0 +1,41 @@ +[server] + host = "0.0.0.0" + port = 3001 + mode = "storage" + kind = "lazy" + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "erc20-watcher-db" + port = 5432 + database = "erc20-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + maxQueryExecutionTime = 100 + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server:8082/graphql" + rpcProviderEndpoint = "http://ipld-eth-server:8081" + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@erc20-watcher-db:5432/erc20-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = true + prefetchBlockCount = 10 diff --git a/build/lib/app/data/config/watcher-erc721/erc721-watcher.toml b/build/lib/app/data/config/watcher-erc721/erc721-watcher.toml new file mode 100644 index 00000000..8e877411 --- /dev/null +++ b/build/lib/app/data/config/watcher-erc721/erc721-watcher.toml @@ -0,0 +1,56 @@ +[server] + host = "0.0.0.0" + port = 3009 + kind = "lazy" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + enableState = true + + # Boolean to filter logs by contract. + filterLogs = false + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "erc721-watcher-db" + port = 5432 + database = "erc721-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + maxQueryExecutionTime = 100 + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server:8082/graphql" + rpcProviderEndpoint = "http://ipld-eth-server:8081" + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@erc721-watcher-db:5432/erc721-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = true + prefetchBlockCount = 10 diff --git a/build/lib/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh b/build/lib/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh new file mode 100755 index 00000000..e574c56b --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/deploy-and-generate-invite.sh @@ -0,0 +1,89 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}" +CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" + +CERC_MOBYMASK_APP_BASE_URI="${CERC_MOBYMASK_APP_BASE_URI:-${DEFAULT_CERC_MOBYMASK_APP_BASE_URI}}" +CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}" + +# Check if CERC_DEPLOYED_CONTRACT environment variable set to skip contract deployment +if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then + echo "CERC_DEPLOYED_CONTRACT is set to '$CERC_DEPLOYED_CONTRACT'" + echo "Skipping contract deployment" + exit 0 +fi + +echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}" + +if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \ + l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \ + [ "$l1_accounts_response" -eq 200 ]; +then + echo "Fetching L1 account credentials using provided URL" + mkdir -p /geth-accounts + wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL" + + # Read the private key of an L1 account to deploy contract + CERC_PRIVATE_KEY_DEPLOYER=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3) +else + echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_DEPLOYER from env" +fi + +# Set the private key +jq --arg privateKey "$CERC_PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json + +# Set the RPC URL +jq --arg rpcUrl "$CERC_L2_GETH_RPC" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json + +# Set the MobyMask app base URI +jq --arg baseURI "$CERC_MOBYMASK_APP_BASE_URI" '.baseURI = $baseURI' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json + +# Wait for L2 Optimism Geth and Node servers to be up before deploying contract +CERC_L2_GETH_HOST="${CERC_L2_GETH_HOST:-${DEFAULT_CERC_L2_GETH_HOST}}" +CERC_L2_GETH_PORT="${CERC_L2_GETH_PORT:-${DEFAULT_CERC_L2_GETH_PORT}}" +CERC_L2_NODE_HOST="${CERC_L2_NODE_HOST:-${DEFAULT_CERC_L2_NODE_HOST}}" +CERC_L2_NODE_PORT="${CERC_L2_NODE_PORT:-${DEFAULT_CERC_L2_NODE_PORT}}" +./wait-for-it.sh -h "${CERC_L2_GETH_HOST}" -p "${CERC_L2_GETH_PORT}" -s -t 0 +./wait-for-it.sh -h "${CERC_L2_NODE_HOST}" -p "${CERC_L2_NODE_PORT}" -s -t 0 + +export RPC_URL="${CERC_L2_GETH_RPC}" + +# Check and exit if a deployment already exists (on restarts) +if [ -f ./config.json ]; then + echo "config.json already exists, checking the contract deployment" + + # Read JSON file + DEPLOYMENT_DETAILS=$(cat config.json) + CONTRACT_ADDRESS=$(echo "$DEPLOYMENT_DETAILS" | jq -r '.address') + + cd ../hardhat + if yarn verifyDeployment --network optimism --contract "${CONTRACT_ADDRESS}"; then + echo "Deployment verfication successful" + cd ../server + else + echo "Deployment verfication failed, please clear MobyMask deployment volume before starting" + exit 1 + fi +fi + +# Wait until balance for deployer account is updated +cd ../hardhat +while true; do + ACCOUNT_BALANCE=$(yarn balance --network optimism "$CERC_PRIVATE_KEY_DEPLOYER" | grep ETH) + + if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then + echo "Account balance updated: $ACCOUNT_BALANCE" + break # exit the loop + fi + + echo "Account balance not updated: $ACCOUNT_BALANCE" + echo "Checking after 2 seconds" + sleep 2 +done + +cd ../server +npm run deployAndGenerateInvite diff --git a/build/lib/app/data/config/watcher-mobymask-v2/generate-peer-ids.sh b/build/lib/app/data/config/watcher-mobymask-v2/generate-peer-ids.sh new file mode 100755 index 00000000..5c97c45d --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/generate-peer-ids.sh @@ -0,0 +1,20 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# Check for peer ids in ./peers folder, create if not present +if [ -f /peer-ids/relay-id.json ]; then + echo "Using peer id for relay node from the mounted volume" +else + echo "Creating a new peer id for relay node" + yarn create-peer -f /peer-ids/relay-id.json +fi + +if [ -f /peer-ids/peer-id.json ]; then + echo "Using peer id for peer node from the mounted volume" +else + echo "Creating a new peer id for peer node" + yarn create-peer -f /peer-ids/peer-id.json +fi diff --git a/build/lib/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh b/build/lib/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh new file mode 100755 index 00000000..9f343340 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/mobymask-app-start.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_CHAIN_ID="${CERC_CHAIN_ID:-${DEFAULT_CERC_CHAIN_ID}}" +CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}" +CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}" +CERC_APP_WATCHER_URL="${CERC_APP_WATCHER_URL:-${DEFAULT_CERC_APP_WATCHER_URL}}" + +# If not set (or []), check the mounted volume for relay peer id +if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then + echo "CERC_RELAY_NODES not provided, taking from the mounted volume" + CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]" +fi + +echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES" + +if [ -z "$CERC_DEPLOYED_CONTRACT" ]; then + # Use config from mounted volume (when running web-app along with watcher stack) + echo "Taking config for deployed contract from mounted volume" + while [ ! -f /server/config.json ]; do + echo "Config not found, retrying after 5 seconds" + sleep 5 + done + + # Get deployed contract address and chain id + CERC_DEPLOYED_CONTRACT=$(jq -r '.address' /server/config.json | tr -d '"') + CERC_CHAIN_ID=$(jq -r '.chainId' /server/config.json) +else + echo "Taking deployed contract details from env" +fi + +# Use yq to create config.yml with environment variables +yq -n ".address = env(CERC_DEPLOYED_CONTRACT)" > /config/config.yml +yq ".watcherUrl = env(CERC_APP_WATCHER_URL)" -i /config/config.yml +yq ".chainId = env(CERC_CHAIN_ID)" -i /config/config.yml +yq ".relayNodes = strenv(CERC_RELAY_NODES)" -i /config/config.yml + +/scripts/start-serving-app.sh diff --git a/build/lib/app/data/config/watcher-mobymask-v2/mobymask-params.env b/build/lib/app/data/config/watcher-mobymask-v2/mobymask-params.env new file mode 100644 index 00000000..6d1bf063 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/mobymask-params.env @@ -0,0 +1,26 @@ +# Defaults + +# Watcher endpoint +DEFAULT_CERC_APP_WATCHER_URL="http://localhost:3001" + +# Set of relay peers to connect to from the relay node +DEFAULT_CERC_RELAY_PEERS=[] + +# Domain to be used in the relay node's announce address +DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN= + +# Base URI for mobymask-app (used for generating invite) +DEFAULT_CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#" + +# Set to false for disabling watcher peer to send txs to L2 +DEFAULT_CERC_ENABLE_PEER_L2_TXS=true + +# Set deployed MobyMask contract address to avoid deploying contract in stack +# mobymask-app will use this contract address in config if run separately +DEFAULT_CERC_DEPLOYED_CONTRACT= + +# Chain ID is used by mobymask web-app for txs +DEFAULT_CERC_CHAIN_ID=42069 + +# Set of relay nodes to be used by web-apps +DEFAULT_CERC_RELAY_NODES=[] diff --git a/build/lib/app/data/config/watcher-mobymask-v2/optimism-params.env b/build/lib/app/data/config/watcher-mobymask-v2/optimism-params.env new file mode 100644 index 00000000..e0d2cd75 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/optimism-params.env @@ -0,0 +1,14 @@ +# Defaults + +# L2 endpoints +DEFAULT_CERC_L2_GETH_RPC="http://op-geth:8545" + +# Endpoints waited on before contract deployment +DEFAULT_CERC_L2_GETH_HOST="op-geth" +DEFAULT_CERC_L2_GETH_PORT=8545 + +DEFAULT_CERC_L2_NODE_HOST="op-node" +DEFAULT_CERC_L2_NODE_PORT=8547 + +# URL to get CSV with credentials for accounts on L1 to perform txs on L2 +DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv" diff --git a/build/lib/app/data/config/watcher-mobymask-v2/secrets-template.json b/build/lib/app/data/config/watcher-mobymask-v2/secrets-template.json new file mode 100644 index 00000000..da4939ac --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/secrets-template.json @@ -0,0 +1,5 @@ +{ + "rpcUrl": "", + "privateKey": "", + "baseURI": "" +} diff --git a/build/lib/app/data/config/watcher-mobymask-v2/set-tests-env.sh b/build/lib/app/data/config/watcher-mobymask-v2/set-tests-env.sh new file mode 100755 index 00000000..5e31b763 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/set-tests-env.sh @@ -0,0 +1,10 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /peer-ids/relay-id.json)" + +# Write the relay node's multiaddr to /app/packages/peer/.env for running tests +echo "RELAY=\"$CERC_RELAY_MULTIADDR\"" > ./.env diff --git a/build/lib/app/data/config/watcher-mobymask-v2/start-server.sh b/build/lib/app/data/config/watcher-mobymask-v2/start-server.sh new file mode 100755 index 00000000..b46a7c14 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/start-server.sh @@ -0,0 +1,56 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}" +CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}" + +CERC_RELAY_PEERS="${CERC_RELAY_PEERS:-${DEFAULT_CERC_RELAY_PEERS}}" +CERC_RELAY_ANNOUNCE_DOMAIN="${CERC_RELAY_ANNOUNCE_DOMAIN:-${DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN}}" +CERC_ENABLE_PEER_L2_TXS="${CERC_ENABLE_PEER_L2_TXS:-${DEFAULT_CERC_ENABLE_PEER_L2_TXS}}" +CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}" + +echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}" + +CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /app/peers/relay-id.json)" + +# Use contract address from environment variable or set from config.json in mounted volume +if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then + CONTRACT_ADDRESS="${CERC_DEPLOYED_CONTRACT}" +else + # Assign deployed contract address from server config (created by mobymask container after deploying contract) + CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"') +fi + +if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \ + l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \ + [ "$l1_accounts_response" -eq 200 ]; +then + echo "Fetching L1 account credentials using provided URL" + mkdir -p /geth-accounts + wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL" + + # Read the private key of an L1 account for sending txs from peer + CERC_PRIVATE_KEY_PEER=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv) +else + echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_PEER from env" +fi + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_RELAY_PEERS|${CERC_RELAY_PEERS}|g; \ + s/REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN/${CERC_RELAY_ANNOUNCE_DOMAIN}/g; \ + s|REPLACE_WITH_CERC_RELAY_MULTIADDR|${CERC_RELAY_MULTIADDR}|g; \ + s/REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS/${CERC_ENABLE_PEER_L2_TXS}/g; \ + s/REPLACE_WITH_CERC_PRIVATE_KEY_PEER/${CERC_PRIVATE_KEY_PEER}/g; \ + s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/g; \ + s|REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT|${CERC_L2_GETH_RPC}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo 'yarn server' +yarn server diff --git a/build/lib/app/data/config/watcher-mobymask-v2/test-app-config.json b/build/lib/app/data/config/watcher-mobymask-v2/test-app-config.json new file mode 100644 index 00000000..cce15fd8 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/test-app-config.json @@ -0,0 +1,6 @@ +{ + "relayNodes": [], + "peer": { + "enableDebugInfo": true + } +} diff --git a/build/lib/app/data/config/watcher-mobymask-v2/test-app-start.sh b/build/lib/app/data/config/watcher-mobymask-v2/test-app-start.sh new file mode 100755 index 00000000..42e4397d --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/test-app-start.sh @@ -0,0 +1,20 @@ +#!/bin/sh +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}" + +# If not set (or []), check the mounted volume for relay peer id +if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then + echo "CERC_RELAY_NODES not provided, taking from the mounted volume" + CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]" +fi + +echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES" + +# Use yq to create config.yml with environment variables +yq -n ".relayNodes = strenv(CERC_RELAY_NODES)" > /config/config.yml + +/scripts/start-serving-app.sh diff --git a/build/lib/app/data/config/watcher-mobymask-v2/watcher-config-template.toml b/build/lib/app/data/config/watcher-mobymask-v2/watcher-config-template.toml new file mode 100644 index 00000000..e6ce0750 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask-v2/watcher-config-template.toml @@ -0,0 +1,76 @@ +[server] + host = "0.0.0.0" + port = 3001 + kind = "lazy" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + enableState = true + + # Boolean to filter logs by contract. + filterLogs = true + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = -1 + + [server.p2p] + enableRelay = true + enablePeer = true + + [server.p2p.relay] + host = "0.0.0.0" + port = 9090 + relayPeers = REPLACE_WITH_CERC_RELAY_PEERS + peerIdFile = './peers/relay-id.json' + announce = 'REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN' + enableDebugInfo = true + + [server.p2p.peer] + relayMultiaddr = 'REPLACE_WITH_CERC_RELAY_MULTIADDR' + pubSubTopic = 'mobymask' + peerIdFile = './peers/peer-id.json' + enableDebugInfo = true + enableL2Txs = REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS + + [server.p2p.peer.l2TxsConfig] + privateKey = 'REPLACE_WITH_CERC_PRIVATE_KEY_PEER' + contractAddress = 'REPLACE_WITH_CONTRACT_ADDRESS' + +[metrics] + host = "0.0.0.0" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "mobymask-watcher-db" + port = 5432 + database = "mobymask-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server:8083/graphql" + rpcProviderEndpoint = "REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT" + blockDelayInMilliSecs = 60000 + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@mobymask-watcher-db/mobymask-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 diff --git a/build/lib/app/data/config/watcher-mobymask/mobymask-watcher-db.sql b/build/lib/app/data/config/watcher-mobymask/mobymask-watcher-db.sql new file mode 100644 index 00000000..5b116b79 --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask/mobymask-watcher-db.sql @@ -0,0 +1,1062 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 12.11 +-- Dumped by pg_dump version 14.3 (Ubuntu 14.3-0ubuntu0.22.04.1) + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: ipld_block_kind_enum; Type: TYPE; Schema: public; Owner: vdbm +-- + +CREATE TYPE public.ipld_block_kind_enum AS ENUM ( + 'diff', + 'init', + 'diff_staged', + 'checkpoint' +); + + +ALTER TYPE public.ipld_block_kind_enum OWNER TO vdbm; + +SET default_tablespace = ''; + +SET default_table_access_method = heap; + +-- +-- Name: _owner; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public._owner ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + value character varying NOT NULL, + proof text +); + + +ALTER TABLE public._owner OWNER TO vdbm; + +-- +-- Name: _owner_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public._owner_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public._owner_id_seq OWNER TO vdbm; + +-- +-- Name: _owner_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public._owner_id_seq OWNED BY public._owner.id; + + +-- +-- Name: block_progress; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.block_progress ( + id integer NOT NULL, + cid character varying NOT NULL, + block_hash character varying(66) NOT NULL, + parent_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + block_timestamp integer NOT NULL, + num_events integer NOT NULL, + num_processed_events integer NOT NULL, + last_processed_event_index integer NOT NULL, + is_complete boolean NOT NULL, + is_pruned boolean DEFAULT false NOT NULL, + created_at timestamp without time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.block_progress OWNER TO vdbm; + +-- +-- Name: block_progress_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.block_progress_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.block_progress_id_seq OWNER TO vdbm; + +-- +-- Name: block_progress_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.block_progress_id_seq OWNED BY public.block_progress.id; + + +-- +-- Name: contract; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.contract ( + id integer NOT NULL, + address character varying(42) NOT NULL, + kind character varying NOT NULL, + checkpoint boolean NOT NULL, + starting_block integer NOT NULL +); + + +ALTER TABLE public.contract OWNER TO vdbm; + +-- +-- Name: contract_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.contract_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.contract_id_seq OWNER TO vdbm; + +-- +-- Name: contract_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.contract_id_seq OWNED BY public.contract.id; + + +-- +-- Name: domain_hash; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.domain_hash ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + value character varying NOT NULL, + proof text +); + + +ALTER TABLE public.domain_hash OWNER TO vdbm; + +-- +-- Name: domain_hash_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.domain_hash_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.domain_hash_id_seq OWNER TO vdbm; + +-- +-- Name: domain_hash_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.domain_hash_id_seq OWNED BY public.domain_hash.id; + + +-- +-- Name: event; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.event ( + id integer NOT NULL, + tx_hash character varying(66) NOT NULL, + index integer NOT NULL, + contract character varying(42) NOT NULL, + event_name character varying(256) NOT NULL, + event_info text NOT NULL, + extra_info text NOT NULL, + proof text NOT NULL, + block_id integer +); + + +ALTER TABLE public.event OWNER TO vdbm; + +-- +-- Name: event_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.event_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.event_id_seq OWNER TO vdbm; + +-- +-- Name: event_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.event_id_seq OWNED BY public.event.id; + + +-- +-- Name: ipld_block; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.ipld_block ( + id integer NOT NULL, + contract_address character varying(42) NOT NULL, + cid character varying NOT NULL, + kind public.ipld_block_kind_enum NOT NULL, + data bytea NOT NULL, + block_id integer +); + + +ALTER TABLE public.ipld_block OWNER TO vdbm; + +-- +-- Name: ipld_block_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.ipld_block_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.ipld_block_id_seq OWNER TO vdbm; + +-- +-- Name: ipld_block_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.ipld_block_id_seq OWNED BY public.ipld_block.id; + + +-- +-- Name: ipld_status; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.ipld_status ( + id integer NOT NULL, + latest_hooks_block_number integer NOT NULL, + latest_checkpoint_block_number integer NOT NULL, + latest_ipfs_block_number integer NOT NULL +); + + +ALTER TABLE public.ipld_status OWNER TO vdbm; + +-- +-- Name: ipld_status_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.ipld_status_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.ipld_status_id_seq OWNER TO vdbm; + +-- +-- Name: ipld_status_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.ipld_status_id_seq OWNED BY public.ipld_status.id; + + +-- +-- Name: is_member; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.is_member ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + key0 character varying NOT NULL, + value boolean NOT NULL, + proof text +); + + +ALTER TABLE public.is_member OWNER TO vdbm; + +-- +-- Name: is_member_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.is_member_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.is_member_id_seq OWNER TO vdbm; + +-- +-- Name: is_member_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.is_member_id_seq OWNED BY public.is_member.id; + + +-- +-- Name: is_phisher; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.is_phisher ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + key0 character varying NOT NULL, + value boolean NOT NULL, + proof text +); + + +ALTER TABLE public.is_phisher OWNER TO vdbm; + +-- +-- Name: is_phisher_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.is_phisher_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.is_phisher_id_seq OWNER TO vdbm; + +-- +-- Name: is_phisher_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.is_phisher_id_seq OWNED BY public.is_phisher.id; + + +-- +-- Name: is_revoked; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.is_revoked ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + key0 character varying NOT NULL, + value boolean NOT NULL, + proof text +); + + +ALTER TABLE public.is_revoked OWNER TO vdbm; + +-- +-- Name: is_revoked_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.is_revoked_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.is_revoked_id_seq OWNER TO vdbm; + +-- +-- Name: is_revoked_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.is_revoked_id_seq OWNED BY public.is_revoked.id; + + +-- +-- Name: multi_nonce; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.multi_nonce ( + id integer NOT NULL, + block_hash character varying(66) NOT NULL, + block_number integer NOT NULL, + contract_address character varying(42) NOT NULL, + key0 character varying(42) NOT NULL, + key1 numeric NOT NULL, + value numeric NOT NULL, + proof text +); + + +ALTER TABLE public.multi_nonce OWNER TO vdbm; + +-- +-- Name: multi_nonce_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.multi_nonce_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.multi_nonce_id_seq OWNER TO vdbm; + +-- +-- Name: multi_nonce_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.multi_nonce_id_seq OWNED BY public.multi_nonce.id; + + +-- +-- Name: sync_status; Type: TABLE; Schema: public; Owner: vdbm +-- + +CREATE TABLE public.sync_status ( + id integer NOT NULL, + chain_head_block_hash character varying(66) NOT NULL, + chain_head_block_number integer NOT NULL, + latest_indexed_block_hash character varying(66) NOT NULL, + latest_indexed_block_number integer NOT NULL, + latest_canonical_block_hash character varying(66) NOT NULL, + latest_canonical_block_number integer NOT NULL, + initial_indexed_block_hash character varying(66) NOT NULL, + initial_indexed_block_number integer NOT NULL +); + + +ALTER TABLE public.sync_status OWNER TO vdbm; + +-- +-- Name: sync_status_id_seq; Type: SEQUENCE; Schema: public; Owner: vdbm +-- + +CREATE SEQUENCE public.sync_status_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +ALTER TABLE public.sync_status_id_seq OWNER TO vdbm; + +-- +-- Name: sync_status_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: vdbm +-- + +ALTER SEQUENCE public.sync_status_id_seq OWNED BY public.sync_status.id; + + +-- +-- Name: _owner id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public._owner ALTER COLUMN id SET DEFAULT nextval('public._owner_id_seq'::regclass); + + +-- +-- Name: block_progress id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.block_progress ALTER COLUMN id SET DEFAULT nextval('public.block_progress_id_seq'::regclass); + + +-- +-- Name: contract id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.contract ALTER COLUMN id SET DEFAULT nextval('public.contract_id_seq'::regclass); + + +-- +-- Name: domain_hash id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.domain_hash ALTER COLUMN id SET DEFAULT nextval('public.domain_hash_id_seq'::regclass); + + +-- +-- Name: event id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.event ALTER COLUMN id SET DEFAULT nextval('public.event_id_seq'::regclass); + + +-- +-- Name: ipld_block id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.ipld_block ALTER COLUMN id SET DEFAULT nextval('public.ipld_block_id_seq'::regclass); + + +-- +-- Name: ipld_status id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.ipld_status ALTER COLUMN id SET DEFAULT nextval('public.ipld_status_id_seq'::regclass); + + +-- +-- Name: is_member id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_member ALTER COLUMN id SET DEFAULT nextval('public.is_member_id_seq'::regclass); + + +-- +-- Name: is_phisher id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_phisher ALTER COLUMN id SET DEFAULT nextval('public.is_phisher_id_seq'::regclass); + + +-- +-- Name: is_revoked id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_revoked ALTER COLUMN id SET DEFAULT nextval('public.is_revoked_id_seq'::regclass); + + +-- +-- Name: multi_nonce id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.multi_nonce ALTER COLUMN id SET DEFAULT nextval('public.multi_nonce_id_seq'::regclass); + + +-- +-- Name: sync_status id; Type: DEFAULT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.sync_status ALTER COLUMN id SET DEFAULT nextval('public.sync_status_id_seq'::regclass); + + +-- +-- Data for Name: _owner; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public._owner (id, block_hash, block_number, contract_address, value, proof) FROM stdin; +\. + + +-- +-- Data for Name: block_progress; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.block_progress (id, cid, block_hash, parent_hash, block_number, block_timestamp, num_events, num_processed_events, last_processed_event_index, is_complete, is_pruned, created_at) FROM stdin; +1 bagiacgzahk6aqbbp75hft2xvtqnj425qaxj7ze4fspykcs745cyxg34bb3ba 0x3abc08042fff4e59eaf59c1a9e6bb005d3fc938593f0a14bfce8b1736f810ec2 0xafbdc83ac2dc79b5500c67751472eeac76594e4466c367b5f4a2895cd175ed97 14869713 1653872939 1 1 77 t f 2022-07-18 12:34:00.523 +5 bagiacgzav62hayc73buzkf24foyh5vrnt54ndxv76m6of7dprvpqpkpl5sra 0xafb470605fd86995175c2bb07ed62d9f78d1debff33ce2fc6f8d5f07a9ebeca2 0x33283f0fa7702e8c366715738c1d34c9750edd9cf74ae5dfb8d11f262ad69027 14885755 1654099778 2 2 119 t f 2022-07-18 12:34:42.361 +2 bagiacgzafdfrnz2azvox32djx3rjk7tuij4q5hlxjzxhdackm6jty7tcqa4a 0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038 0xabd4915ed36022a05a9d95f51dc702103a2caab4c2f161321ab12a6bb77f01d1 14875233 1653950619 8 8 440 t f 2022-07-18 12:34:09.416 +3 bagiacgzan6rpxee4tm4gmzgcer3yx4enpvodtpzn2t2bjj72cblkhrng5bxa 0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e 0x976a8cb34b85994bce2fa5bda884f2a7c8ad68050645cb2dba5519e59cba013d 14876405 1653966919 4 4 274 t f 2022-07-18 12:34:19.014 +4 bagiacgzabrcmklsd5c3egq2hlrypg7opagtvuysqaf5r2q7nue2stozixbaa 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 0xe48d7477413de216d3f7f4868b472047b82c8738890d7096f6c0e8398e92e39e 14884873 1654087572 12 12 518 t f 2022-07-18 12:34:33.681 +6 bagiacgzad4pz3x2ugxppkduwmvr2ncx4gavr2q5r5limcwr3gol2c7cff24q 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 0xbb8016b536b4f4e8ee93c614d74485a7d7eca814b49132599a932cfd03e324a2 15234194 1659054431 12 12 236 t f 2022-07-29 10:37:48.236 +\. + + +-- +-- Data for Name: contract; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.contract (id, address, kind, checkpoint, starting_block) FROM stdin; +1 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherRegistry t 14869713 +\. + + +-- +-- Data for Name: domain_hash; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.domain_hash (id, block_hash, block_number, contract_address, value, proof) FROM stdin; +\. + + +-- +-- Data for Name: event; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.event (id, tx_hash, index, contract, event_name, event_info, extra_info, proof, block_id) FROM stdin; +1 0x82f33cec81da44e94ef69924bc7d786d3f7856f06c1ef583d266dd1b7f091b82 77 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 OwnershipTransferred {"previousOwner":"0x0000000000000000000000000000000000000000","newOwner":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x","tx":{"cid":"bagjqcgzaqlztz3eb3jcostxwtesly7lynu7xqvxqnqppla6sm3orw7yjdoba","txHash":"0x82f33cec81da44e94ef69924bc7d786d3f7856f06c1ef583d266dd1b7f091b82","index":38,"src":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","dst":"","__typename":"EthTransactionCid"},"eventSignature":"OwnershipTransferred(address,address)"} {"data":"{\\"blockHash\\":\\"0x3abc08042fff4e59eaf59c1a9e6bb005d3fc938593f0a14bfce8b1736f810ec2\\",\\"receiptCID\\":\\"bagkacgzappvknoiwyepymknt7dbcfh3jlejpscm3frdd66dwvkvmfwuuuota\\",\\"log\\":{\\"cid\\":\\"bagmqcgzak5xa5kdm3sjuvm3un77ll7oz2degukktjargydrj4fayhimdfo3a\\",\\"ipldBlock\\":\\"0xf882822080b87df87b94b06e6db9288324738f04fcaac910f5a60102c1f8f863a08be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d480\\"}}"} 1 +2 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 433 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaz22koutltuxcphbuc72dcdt6xuqr2e3mk4w75xksg2zzqaynbmoa\\",\\"ipldBlock\\":\\"0xf87f30b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 2 +3 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 434 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0xdd77c46f6a736e44f19d33c56378a607fe3868a8c1a0866951beab5c9abc9aab","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0xdd77c46f6a736e44f19d33c56378a607fe3868a8c1a0866951beab5c9abc9aab"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaflsnlinnufdz4ipp7vhrvg4gggvptx7ringzkwjfsrkw5bstou7a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0dd77c46f6a736e44f19d33c56378a607fe3868a8c1a0866951beab5c9abc9aaba00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 2 +4 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 435 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 2 +5 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 436 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x501b05f326e247749a9ee05e173a4b32508afcf85ec6dbb26a6cbb2a4f2e8671","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x501b05f326e247749a9ee05e173a4b32508afcf85ec6dbb26a6cbb2a4f2e8671"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzalcfjovtx7akikb4dhhu3i65pym47rdy3rys6d7trlfdzmr53us2a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0501b05f326e247749a9ee05e173a4b32508afcf85ec6dbb26a6cbb2a4f2e8671a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 2 +6 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 437 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 2 +7 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 438 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x0b73fffe472959ca14f2bfa56de755ad570d80daaf8eb935ac5e60578d9cdf6e","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x0b73fffe472959ca14f2bfa56de755ad570d80daaf8eb935ac5e60578d9cdf6e"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaehy7vjkfidari3wc72kp3baac2w5zjfcmt4wvz6bs4mgkpjrlnta\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a00b73fffe472959ca14f2bfa56de755ad570d80daaf8eb935ac5e60578d9cdf6ea00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 2 +8 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 439 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 2 +9 0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9 440 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x8276afdf1db4e6957dd6e50fb3e6ddb56594c9adcff5403706515b9eab719f27","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x8276afdf1db4e6957dd6e50fb3e6ddb56594c9adcff5403706515b9eab719f27"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzatcmy65cgyyyxr2tx2gyyjtp6panzzph7e4ia6a4an5ecwnkpdpuq","txHash":"0x98998f7446c63178ea77d1b184cdfe781b9cbcff27100f03806f482b354f1be9","index":136,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"receiptCID\\":\\"bagkacgza7njxwiac6p4vxcmw5gnyxs32bum5jeq6k3j7xxyzaqm7gcrw6hwa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaqbsfupctztrjxngvfcntxi5c4pdee5sh46wmtlbs5sbbqbplcoiq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a08276afdf1db4e6957dd6e50fb3e6ddb56594c9adcff5403706515b9eab719f27a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 2 +10 0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff 271 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0x50f01432A375DcDEa074957154e4F8d1aEB4177d"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x00000000000000000000000050f01432a375dcdea074957154e4f8d1aeb4177d"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzasmazd2yetmooddsywlaac6q4gij3wue32vdjvtj3fnwb77eilh7q","txHash":"0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff","index":296,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"receiptCID\\":\\"bagkacgzaiwyyw2llnh3rwbyep42qkqyftchkkppb5qj5f4u6ltdz2cl5kcaa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzadn3fcrvtf5wwsqprt4qjdxll76kn7teshumu3rmosxai55l3qysq\\",\\"ipldBlock\\":\\"0xf87f30b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a000000000000000000000000050f01432a375dcdea074957154e4f8d1aeb4177da0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 3 +11 0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff 272 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x5be61e7fb5d5175135aaa6b232f13d9b22a229113638cdc0bac78221ff9c9aa0","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x5be61e7fb5d5175135aaa6b232f13d9b22a229113638cdc0bac78221ff9c9aa0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzasmazd2yetmooddsywlaac6q4gij3wue32vdjvtj3fnwb77eilh7q","txHash":"0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff","index":296,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"receiptCID\\":\\"bagkacgzaiwyyw2llnh3rwbyep42qkqyftchkkppb5qj5f4u6ltdz2cl5kcaa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaq5l7ow4vbidbo3p2djy5qy4mprqyir4dmol2uqeyvxc7fxfl4kvq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a05be61e7fb5d5175135aaa6b232f13d9b22a229113638cdc0bac78221ff9c9aa0a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 3 +12 0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff 273 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0x50f01432A375DcDEa074957154e4F8d1aEB4177d"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x00000000000000000000000050f01432a375dcdea074957154e4f8d1aeb4177d"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzasmazd2yetmooddsywlaac6q4gij3wue32vdjvtj3fnwb77eilh7q","txHash":"0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff","index":296,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"receiptCID\\":\\"bagkacgzaiwyyw2llnh3rwbyep42qkqyftchkkppb5qj5f4u6ltdz2cl5kcaa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaas5munc2du7d2ipgyxqsa7reeueczkcfyrh5zjjesllsxatj3mgq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a000000000000000000000000050f01432a375dcdea074957154e4f8d1aeb4177da0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 3 +13 0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff 274 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x956e5681abbafa25458057b0abaa1a3cec4108d2289954836d0c7f5b37fd6580","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x956e5681abbafa25458057b0abaa1a3cec4108d2289954836d0c7f5b37fd6580"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzasmazd2yetmooddsywlaac6q4gij3wue32vdjvtj3fnwb77eilh7q","txHash":"0x930191eb049b1ce18e58b2c0017a1c3213bb509bd5469acd3b2b6c1ffc8859ff","index":296,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"receiptCID\\":\\"bagkacgzaiwyyw2llnh3rwbyep42qkqyftchkkppb5qj5f4u6ltdz2cl5kcaa\\",\\"log\\":{\\"cid\\":\\"bagmqcgzagp47k6p3tgrom3adpx6jvr45vne2edtejaenqggtxjjfqramcmea\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0956e5681abbafa25458057b0abaa1a3cec4108d2289954836d0c7f5b37fd6580a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 3 +14 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 507 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzagf7jx3lguaponolmnsjyxm2mhpkaroghk26roi7okwglucjtjs4q\\",\\"ipldBlock\\":\\"0xf87f30b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +15 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 508 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0xdb00d9ee49d48ca5077597917bf50d84d2671b16a94c95fa4fa5be69bc50c03a","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0xdb00d9ee49d48ca5077597917bf50d84d2671b16a94c95fa4fa5be69bc50c03a"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzatttg7cjphkpc46klxy32jr4vfj6lxo7573nz3rob6dvnq7magsoa\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0db00d9ee49d48ca5077597917bf50d84d2671b16a94c95fa4fa5be69bc50c03aa00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +16 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 509 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2g5np2s2ffmppacclx3gwmrjeumoi5c44l6lt64ekctavu5f356a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +17 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 510 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x33dc7a4e6362711b3cbdc90edcb9a621ed5c2ba73eb4adbf3e90cc21764d550d","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x33dc7a4e6362711b3cbdc90edcb9a621ed5c2ba73eb4adbf3e90cc21764d550d"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzamjreuppb5xkmjdelhahazmb54mzykjufxj4fvo42u26iqxuxpzdq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a033dc7a4e6362711b3cbdc90edcb9a621ed5c2ba73eb4adbf3e90cc21764d550da00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +18 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 511 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2g5np2s2ffmppacclx3gwmrjeumoi5c44l6lt64ekctavu5f356a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +19 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 512 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0xdef5c249e7975deeacae0568ccd7ad10f4b482c4ef3476bf448ff9bb6167731f","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0xdef5c249e7975deeacae0568ccd7ad10f4b482c4ef3476bf448ff9bb6167731f"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaeokcjndceushmyfhdkag7fwkg25knbwoxjxqlqhjlrkgmhjj27hq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0def5c249e7975deeacae0568ccd7ad10f4b482c4ef3476bf448ff9bb6167731fa00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +20 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 513 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2g5np2s2ffmppacclx3gwmrjeumoi5c44l6lt64ekctavu5f356a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +21 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 514 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x165892f97103f95276884abea5e604985437687a8e5b35ac4428098f69c66a9f","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x165892f97103f95276884abea5e604985437687a8e5b35ac4428098f69c66a9f"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanwfms4swgcarbwfosr7uhmyxsefofusyj6m2oyoxy54zldewkeda\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a0165892f97103f95276884abea5e604985437687a8e5b35ac4428098f69c66a9fa00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +22 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 515 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2g5np2s2ffmppacclx3gwmrjeumoi5c44l6lt64ekctavu5f356a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +23 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 516 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x4e47d3592c7c70485bf59f3aae389fbc82455da11000f53ac0665c5e343c8e14","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x4e47d3592c7c70485bf59f3aae389fbc82455da11000f53ac0665c5e343c8e14"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza5yzqveeeqvq4wabjxyulanz6ynqe2vhjhwplkff4xjlwkjve3cta\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a04e47d3592c7c70485bf59f3aae389fbc82455da11000f53ac0665c5e343c8e14a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +24 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 517 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xBc89f39d47BF0f67CA1e0C7aBBE3236F454f748a"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748a"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2g5np2s2ffmppacclx3gwmrjeumoi5c44l6lt64ekctavu5f356a\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000bc89f39d47bf0f67ca1e0c7abbe3236f454f748aa0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 4 +25 0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed 518 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x16a1ef186d11b33d747c8c44fc8bf3445db567cd5ab29d9e2c1c81781a51647a","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x16a1ef186d11b33d747c8c44fc8bf3445db567cd5ab29d9e2c1c81781a51647a"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzard4ovbngn6f46s7hqbcjmymigu2pclf3kttjywdwc62mfdi24dwq","txHash":"0x88f8ea85a66f8bcf4be780449661883534f12cbb54e69c587617b4c28d1ae0ed","index":193,"src":"0x19c49117a8167296cAF5D23Ab48e355ec1c8bE8B","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"receiptCID\\":\\"bagkacgza756voltxaaftxraxkdjhuh6jh57zla6mqkunpiajivf477kkoleq\\",\\"log\\":{\\"cid\\":\\"bagmqcgzagwxahowqxuwrld5k2yr5vzhkoelh7hu46dpvctllikaodxbn5yyq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a016a1ef186d11b33d747c8c44fc8bf3445db567cd5ab29d9e2c1c81781a51647aa00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 4 +26 0x6e2401fdf1301a0700ab604be31485a5a2e76b1a781ec3a4eff1e8100db80719 118 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0x8C38B6212D6A78EB7a2DA7E204fBfe003903CF47"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x0000000000000000000000008c38b6212d6a78eb7a2da7e204fbfe003903cf47"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzanysad7prganaoaflmbf6gfefuwroo2y2papmhjhp6hubadnya4mq","txHash":"0x6e2401fdf1301a0700ab604be31485a5a2e76b1a781ec3a4eff1e8100db80719","index":56,"src":"0xE8D848debB3A3e12AA815b15900c8E020B863F31","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0xafb470605fd86995175c2bb07ed62d9f78d1debff33ce2fc6f8d5f07a9ebeca2\\",\\"receiptCID\\":\\"bagkacgzaklu3ddgwwsmemfw5b2wgfs6c62euf233o3tslufku4u2v4bdt7za\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaja54iaazd37cfk6pkqnkidfyguloff4er2e57oavnessaunweyma\\",\\"ipldBlock\\":\\"0xf87f30b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a00000000000000000000000008c38b6212d6a78eb7a2da7e204fbfe003903cf47a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 5 +27 0x6e2401fdf1301a0700ab604be31485a5a2e76b1a781ec3a4eff1e8100db80719 119 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 MemberStatusUpdated {"entity":"0x1c27f716f8d8b62fd373e4f08eb48277c22fbb3b3d146ba67313ab3b6d046fd0","isMember":true} {"topics":["0x88e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2","0x1c27f716f8d8b62fd373e4f08eb48277c22fbb3b3d146ba67313ab3b6d046fd0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzanysad7prganaoaflmbf6gfefuwroo2y2papmhjhp6hubadnya4mq","txHash":"0x6e2401fdf1301a0700ab604be31485a5a2e76b1a781ec3a4eff1e8100db80719","index":56,"src":"0xE8D848debB3A3e12AA815b15900c8E020B863F31","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"MemberStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0xafb470605fd86995175c2bb07ed62d9f78d1debff33ce2fc6f8d5f07a9ebeca2\\",\\"receiptCID\\":\\"bagkacgzaklu3ddgwwsmemfw5b2wgfs6c62euf233o3tslufku4u2v4bdt7za\\",\\"log\\":{\\"cid\\":\\"bagmqcgzagx4cimqpipdrqbxwlw44tfvzedutvmqk4euok6d4n3ge77r2xloq\\",\\"ipldBlock\\":\\"0xf87f31b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a088e1b1a43f3edcb9afe941dfea296f5bc32fab715b5fc9aa101ec26d87d2e8a2a01c27f716f8d8b62fd373e4f08eb48277c22fbb3b3d146ba67313ab3b6d046fd0a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 5 +28 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 225 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaz22koutltuxcphbuc72dcdt6xuqr2e3mk4w75xksg2zzqaynbmoa\\",\\"ipldBlock\\":\\"0xf87f30b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +29 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 226 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0xd03b69864961ea513339c2896c365ffde0e6620a1ab832d93c6656f8ce6f988e","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0xd03b69864961ea513339c2896c365ffde0e6620a1ab832d93c6656f8ce6f988e"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgza2uylmeipltns5rcegmzev2dtcpm3yf7exr7azelvmmc45p7en3na\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba0d03b69864961ea513339c2896c365ffde0e6620a1ab832d93c6656f8ce6f988ea00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +30 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 227 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +31 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 228 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0xb3beb6867a4bef1f11b65e036b831cd3b81e74898005c13110e0539fc74e8183","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0xb3beb6867a4bef1f11b65e036b831cd3b81e74898005c13110e0539fc74e8183"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgza23km44tuxt7uhtvhagfn4imaoctdxsvobpdgqtjpunsd7gk3owwq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba0b3beb6867a4bef1f11b65e036b831cd3b81e74898005c13110e0539fc74e8183a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +32 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 229 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +33 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 230 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0xed6ad0a79ec0ad3e559cf0f958d9e28c6e6bf6be025a8249a975c9a8e2180acf","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0xed6ad0a79ec0ad3e559cf0f958d9e28c6e6bf6be025a8249a975c9a8e2180acf"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzatns5jnxezocu52ibouvcladwphpkervyibz35llxy4kxra5kqrxq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba0ed6ad0a79ec0ad3e559cf0f958d9e28c6e6bf6be025a8249a975c9a8e2180acfa00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +34 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 231 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +35 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 232 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0x8f9e6c0c3630ec9bccfb22c903753257d2352a9800255daafcf1665ed3d4be45","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0x8f9e6c0c3630ec9bccfb22c903753257d2352a9800255daafcf1665ed3d4be45"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzaeb4dn6y2qmnizhopkyr7poewd66gm2brx76cskal6kv5pn55hukq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba08f9e6c0c3630ec9bccfb22c903753257d2352a9800255daafcf1665ed3d4be45a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +36 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 233 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +37 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 234 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0x895499123a28e797f284b94560fcc346a421533cb3ed9d4373293d533849e523","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0x895499123a28e797f284b94560fcc346a421533cb3ed9d4373293d533849e523"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzapdolzcaiqir2ankq2of4kdts5spg7ov5ofkgqora47u6kmpijwza\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba0895499123a28e797f284b94560fcc346a421533cb3ed9d4373293d533849e523a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +38 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 235 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 DelegationTriggered {"principal":"0xDdb18b319BE3530560eECFF962032dFAD88212d4","agent":"0xDdb18b319BE3530560eECFF962032dFAD88212d4"} {"topics":["0x185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960","0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4"],"data":"0x000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"DelegationTriggered(address,address)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzanj72wfbfvqby3dvz3jnh5nwstmvl3nlm6kxrkgfio7z643s2qesq\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a0185d11175440fcb6458fbc1889b02953452539ed80ad1da781a5449500f6d960a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4a0000000000000000000000000ddb18b319be3530560eecff962032dfad88212d4\\"}}"} 6 +39 0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3 236 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 PhisherStatusUpdated {"entity":"0x6d99b9b8f38c764f028cc564a69e4aa3c0d94fd4df0a9b0aab23cec3cfa03426","isPhisher":true} {"topics":["0x9d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138b","0x6d99b9b8f38c764f028cc564a69e4aa3c0d94fd4df0a9b0aab23cec3cfa03426"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","tx":{"cid":"bagjqcgzam2xbu7uh235yw6i73gfhwxyac2jge37oze7xmb7jix6yiugi7hrq","txHash":"0x66ae1a7e87d6fb8b791fd98a7b5f001692626feec93f7607e945fd8450c8f9e3","index":438,"src":"0xFDEa65C8e26263F6d9A1B5de9555D2931A33b825","dst":"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8","__typename":"EthTransactionCid"},"eventSignature":"PhisherStatusUpdated(string,bool)"} {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"receiptCID\\":\\"bagkacgzaickyui2bivfkwglvhlgs3dzbgzllvitvssccwsyg6evimm4hfaga\\",\\"log\\":{\\"cid\\":\\"bagmqcgzak4f2sns3dh6lmajwdimphm2h6rj4lqobhu2hrjndtzrkabhywuha\\",\\"ipldBlock\\":\\"0xf87f20b87cf87a94b06e6db9288324738f04fcaac910f5a60102c1f8f842a09d3712f4978fc20b17a1dfbcd563f9aded75d05b6019427a9eca23245220138ba06d99b9b8f38c764f028cc564a69e4aa3c0d94fd4df0a9b0aab23cec3cfa03426a00000000000000000000000000000000000000000000000000000000000000001\\"}}"} 6 +\. + + +-- +-- Data for Name: ipld_block; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.ipld_block (id, contract_address, cid, kind, data, block_id) FROM stdin; +1 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreigxcduvu2npfat2zunf2su63vmksekmqw6hlq7ijz7kfwvsbjolwe init \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e6464696e697466706172656e74a1612ff668657468426c6f636ba263636964a1612f783d626167696163677a61686b366171626270373568667432787674716e6a3432357161786a377a6534667370796b63733734356379786733346262336261636e756d1a00e2e4d1657374617465a0 1 +2 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreihshcncfaozkbpybok4scslmi4ogkdsmoo5guctkl3ov5ij4e7ena diff_staged \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e646b646966665f73746167656466706172656e74a1612f783b6261667972656967786364757675326e70666174327a756e663273753633766d6b73656b6d717736686c7137696a7a376b66777673626a6f6c776568657468426c6f636ba263636964a1612f783d626167696163677a61666466726e7a32617a766f783332646a7833726a6b377475696a347135686c786a7a78686461636b6d366a747937746371613461636e756d1a00e2fa61657374617465a16869734d656d626572a46c5457543a6b756d617669735f64747275656c5457543a6d6574616d61736b64747275656c5457543a74617976616e6f5f64747275656d5457543a64616e66696e6c61796474727565 2 +3 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreidnohfh3z2rgge2z6amrdn33ce66gdusrcwar2kfoig5ijozqo6he diff_staged \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e646b646966665f73746167656466706172656e74a1612f783b6261667972656967786364757675326e70666174327a756e663273753633766d6b73656b6d717736686c7137696a7a376b66777673626a6f6c776568657468426c6f636ba263636964a1612f783d626167696163677a616e36727078656534746d34676d7a6763657233797834656e70766f6474707a6e327432626a6a373263626c6b68726e6735627861636e756d1a00e2fef5657374617465a16869734d656d626572a26c5457543a72656b6d61726b736474727565715457543a6f6d6e61746f73686e6977616c6474727565 3 +4 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreidhsglp25dozbewxekeb5hueh5q4tu5kupwbn6q7tejtpmnk66qsu diff_staged \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e646b646966665f73746167656466706172656e74a1612f783b6261667972656967786364757675326e70666174327a756e663273753633766d6b73656b6d717736686c7137696a7a376b66777673626a6f6c776568657468426c6f636ba263636964a1612f783d626167696163677a616272636d6b6c736435633365677132686c72797067376f706167747675797371616635723271376e75653273746f7a6978626161636e756d1a00e32009657374617465a16869734d656d626572a66d5457543a61666475646c65793064747275656d5457543a666f616d737061636564747275656d5457543a66726f74686369747964747275656f5457543a76756c63616e697a65696f6474727565715457543a6d696b6567757368616e736b796474727565725457543a6c61636f6e69636e6574776f726b6474727565 4 +5 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreifocrnaxaj4qod3atzj4ipq3ocjztlydl3gcgmxiilbi4dbd2o2be diff_staged \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e646b646966665f73746167656466706172656e74a1612f783b6261667972656967786364757675326e70666174327a756e663273753633766d6b73656b6d717736686c7137696a7a376b66777673626a6f6c776568657468426c6f636ba263636964a1612f783d626167696163677a6176363268617963373362757a6b663234666f79683576726e7435346e64787637366d366f6637647072767071706b706c35737261636e756d1a00e3237b657374617465a16869734d656d626572a1735457543a64656e6e69736f6e6265727472616d6474727565 5 +6 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 bafyreicls2qpsocxj6yqwb2ujvrchi7zxeynh5qpevfy6o4un4qapwuwdy diff_staged \\xa2646d657461a4626964782a307842303645364442393238383332343733386630346643414163393130663541363031303243314638646b696e646b646966665f73746167656466706172656e74a1612f783b6261667972656967786364757675326e70666174327a756e663273753633766d6b73656b6d717736686c7137696a7a376b66777673626a6f6c776568657468426c6f636ba263636964a1612f783d626167696163677a616434707a33783275677870706b6475776d7672326e6378346761767232713572356c696d63777233676f6c326337636666323471636e756d1a00e87492657374617465a169697350686973686572a66e5457543a6a67686f7374323031306474727565715457543a6a6164656e37323434303030316474727565735457543a6261647361736b39323539333438396474727565735457543a6361737369647930363131343136356474727565735457543a65737472656c6c33313136333633316474727565735457543a6b696e6762656e37313335333833376474727565 6 +\. + + +-- +-- Data for Name: ipld_status; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.ipld_status (id, latest_hooks_block_number, latest_checkpoint_block_number, latest_ipfs_block_number) FROM stdin; +\. + + +-- +-- Data for Name: is_member; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.is_member (id, block_hash, block_number, contract_address, key0, value, proof) FROM stdin; +1 0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038 14875233 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:danfinlay t {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzajz2idgp3mppl3xecw2jiyrdtpqxdsks3l2vayyrhylj2ddrsvf2q\\",\\"ipldBlock\\":\\"0xe2a0203d41e15b233c6d8a6221399699ffc64b2cca7ada26b947d7642b930362ca2001\\"}}}"} +2 0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038 14875233 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:metamask t {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzayklkqlq7oyerf7d46p2bnccsqgnj24z5ey5iwnn3nesl5b6t2bba\\",\\"ipldBlock\\":\\"0xe2a0208bb17e9a3a883c386024f8e1a6976a71526c4598fd5577bde1e8e78dc5cceb01\\"}}}"} +3 0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038 14875233 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:kumavis_ t {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzac4qmw47e5joqwqb62grydulsl62z6auzi3bpimezqowvedyqfz4a\\",\\"ipldBlock\\":\\"0xe2a020c4db4f66db1cb7f05bfa6518607749beab650a765c80492a458fbef069d21d01\\"}}}"} +4 0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038 14875233 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:tayvano_ t {"data":"{\\"blockHash\\":\\"0x28cb16e740cd5d7de869bee2957e7442790e9d774e6e71804a67933c7e628038\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzau2pcjzqad7bvet5tqprkvo75uyfiuiewle3rzgka65xb4msinxxq\\",\\"ipldBlock\\":\\"0xe2a0325a534478c2e78913d54d916517598739b2920691f3cdaa47dd025f4718492401\\"}}}"} +5 0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e 14876405 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:rekmarks t {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgza6bl5chphg5sp2hbmakf3m3hf5i2aqpwniit7fquldl4cyz6rcjyq\\",\\"ipldBlock\\":\\"0xe2a0370e3dd0b59d081149bd02578f68bc8b82b38d83a65eab9c0039330f2f44b1be01\\"}}}"} +6 0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e 14876405 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:omnatoshniwal t {"data":"{\\"blockHash\\":\\"0x6fa2fb909c9b386664c224778bf08d7d5c39bf2dd4f414a7fa1056a3c5a6e86e\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzaevw2g7ldqq7u2cifx625hj2mtgpthw2gxo55hi3kfhmirlco27kq\\",\\"ipldBlock\\":\\"0xe2a020099e064c465e189f524b4ea5e1e1f880cc2404d54a5c3820cae1426406e3eb01\\"}}}"} +7 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:afdudley0 t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgza4yb2o77os2exgj7ao2gmcycrktszfccus2pgiqayoyesbyv36yuq\\",\\"ipldBlock\\":\\"0xe2a0206f8288d5713c0319b22d7d7871ea9f79da0e2a69c4810045f7f9d8b513c97701\\"}}}"} +8 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:vulcanizeio t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgza43rlyrrrvwbxuo4jwrk2aibx2yau2jwubvtkmufdu62ndxti5pla\\",\\"ipldBlock\\":\\"0xe2a020a206b39b5245e291b83d5b8bcad50fdca5196cedf7e717b87ab79b8d983f0701\\"}}}"} +9 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:laconicnetwork t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzaqlzj74qpi46z4lepfew43klj5jmyoiuzlhma6o6jozkjybc2lsvq\\",\\"ipldBlock\\":\\"0xe2a020ecd3a96a9329551758da7fdf41b5816885e29b184c3939c13c6ea20206fd2901\\"}}}"} +10 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:mikegushansky t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzalgavzfjocdkshzxwlpqmf3azofoz67rvulr5xxuqsvmmuvadwzdq\\",\\"ipldBlock\\":\\"0xe2a0202951bc50ed50810c883cc3f755dabb64394375acece9ea4be99e5a584fe6c901\\"}}}"} +11 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:foamspace t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzasy7at57g5wewqtzjlkh6vudbs7wbwx5qw7637fwi5b3nunw54usq\\",\\"ipldBlock\\":\\"0xe2a02029d04f9e7b98346aa9c447decb17659db9af23890b9c70f579a029cdcf593c01\\"}}}"} +12 0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840 14884873 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:frothcity t {"data":"{\\"blockHash\\":\\"0x0c44c52e43e8b64343475c70f37dcf01a75a6250017b1d43eda13529bb28b840\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzawvqcds52in2gemhyszayvrl5zfc66up6hcuchxmi3ce4kzi5pweq\\",\\"ipldBlock\\":\\"0xe2a02034ac30337c5c70d2540bb4434e35ce4532a4eab91c852dca23deaacb0e275201\\"}}}"} +13 0xafb470605fd86995175c2bb07ed62d9f78d1debff33ce2fc6f8d5f07a9ebeca2 14885755 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:dennisonbertram t {"data":"{\\"blockHash\\":\\"0xafb470605fd86995175c2bb07ed62d9f78d1debff33ce2fc6f8d5f07a9ebeca2\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzaeogthongsys3jydz4jw2sj5t7mqeqbor2qnaium4c5h5v74fqbta\\",\\"ipldBlock\\":\\"0xe19f3fea74c522a79f7db606c382429e0cb363617f45d6fd59cc02a2857144f18801\\"}}}"} +\. + + +-- +-- Data for Name: is_phisher; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.is_phisher (id, block_hash, block_number, contract_address, key0, value, proof) FROM stdin; +1 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:cassidy06114165 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzadyh6cl32cgz3rnd65247arv3fnjw7p6uqfcfysof4dksd2illf6q\\",\\"ipldBlock\\":\\"0xe2a0203c2016b922ff7b5efb562ade4ce1790eac49e191d0d6230b261475b1c2eb9b01\\"}}}"} +2 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:badsask92593489 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzaqrrqdxcwdv654m3vpbiafzvjrhrvs7wv5wncbncb665dprx4cnzq\\",\\"ipldBlock\\":\\"0xe2a0204243b96ea0ada3c3ca9668be1e1ab841ee01999a18d1ebebae8ba2d24aa53101\\"}}}"} +3 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:estrell31163631 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzaq25w3xcn7ahsaclw7lvbhv6wmuft6fwll6gs26pfure52vak2oea\\",\\"ipldBlock\\":\\"0xe2a020e7f0d045adaf03aaca32f26b20a70af72062abbdca72eca237efe7fe297a6a01\\"}}}"} +4 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:kingben71353837 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzal4unm5r3ut4fsolqkibsowhada5aixdmjfaubaxamlrxes2t3eza\\",\\"ipldBlock\\":\\"0xe2a0347aeddef1702483d61eca78b85ff35caff4917a18acef04923858e206c58da401\\"}}}"} +5 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:jaden72440001 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzadfup5fbucciy32alz4upntikcijqiqvwcjszkmuuugna26raioca\\",\\"ipldBlock\\":\\"0xe2a03c76ec48ccf04032d7c8463b37c68e68de9a2602967327c3c70f1a15a11f117b01\\"}}}"} +6 0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9 15234194 0xB06E6DB9288324738f04fCAAc910f5A60102C1F8 TWT:jghost2010 t {"data":"{\\"blockHash\\":\\"0x1f1f9ddf5435def50e966563a68afc302b1d43b1ead0c15a3b3397a17c452eb9\\",\\"account\\":{\\"address\\":\\"0xB06E6DB9288324738f04fCAAc910f5A60102C1F8\\",\\"storage\\":{\\"cid\\":\\"bagmacgzab5h56mqwe45hy3labtlq5tp7hsquoimrfgx3c2eycghukydumcoq\\",\\"ipldBlock\\":\\"0xe2a03da5b9c90f8be3d46373dc4c983ff2427d64c22470e858e62e5b25dd53ff8c7e01\\"}}}"} +\. + + +-- +-- Data for Name: is_revoked; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.is_revoked (id, block_hash, block_number, contract_address, key0, value, proof) FROM stdin; +\. + + +-- +-- Data for Name: multi_nonce; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.multi_nonce (id, block_hash, block_number, contract_address, key0, key1, value, proof) FROM stdin; +\. + + +-- +-- Data for Name: sync_status; Type: TABLE DATA; Schema: public; Owner: vdbm +-- + +COPY public.sync_status (id, chain_head_block_hash, chain_head_block_number, latest_indexed_block_hash, latest_indexed_block_number, latest_canonical_block_hash, latest_canonical_block_number, initial_indexed_block_hash, initial_indexed_block_number) FROM stdin; +\. + + +-- +-- Name: _owner_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public._owner_id_seq', 1, false); + + +-- +-- Name: block_progress_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.block_progress_id_seq', 6, true); + + +-- +-- Name: contract_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.contract_id_seq', 1, true); + + +-- +-- Name: domain_hash_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.domain_hash_id_seq', 1, false); + + +-- +-- Name: event_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.event_id_seq', 39, true); + + +-- +-- Name: ipld_block_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.ipld_block_id_seq', 6, true); + + +-- +-- Name: ipld_status_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.ipld_status_id_seq', 1, false); + + +-- +-- Name: is_member_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.is_member_id_seq', 13, true); + + +-- +-- Name: is_phisher_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.is_phisher_id_seq', 6, true); + + +-- +-- Name: is_revoked_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.is_revoked_id_seq', 1, false); + + +-- +-- Name: multi_nonce_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.multi_nonce_id_seq', 1, false); + + +-- +-- Name: sync_status_id_seq; Type: SEQUENCE SET; Schema: public; Owner: vdbm +-- + +SELECT pg_catalog.setval('public.sync_status_id_seq', 1, false); + + +-- +-- Name: contract PK_17c3a89f58a2997276084e706e8; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.contract + ADD CONSTRAINT "PK_17c3a89f58a2997276084e706e8" PRIMARY KEY (id); + + +-- +-- Name: domain_hash PK_1b2fb63b534a5a1034c9de4af2d; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.domain_hash + ADD CONSTRAINT "PK_1b2fb63b534a5a1034c9de4af2d" PRIMARY KEY (id); + + +-- +-- Name: event PK_30c2f3bbaf6d34a55f8ae6e4614; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.event + ADD CONSTRAINT "PK_30c2f3bbaf6d34a55f8ae6e4614" PRIMARY KEY (id); + + +-- +-- Name: multi_nonce PK_31dab24db96d04fbf687ae28b00; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.multi_nonce + ADD CONSTRAINT "PK_31dab24db96d04fbf687ae28b00" PRIMARY KEY (id); + + +-- +-- Name: ipld_block PK_35d483f7d0917b68494f40066ac; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.ipld_block + ADD CONSTRAINT "PK_35d483f7d0917b68494f40066ac" PRIMARY KEY (id); + + +-- +-- Name: _owner PK_3ecb7a5aa92511dde29aa90a070; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public._owner + ADD CONSTRAINT "PK_3ecb7a5aa92511dde29aa90a070" PRIMARY KEY (id); + + +-- +-- Name: is_revoked PK_578b81f9905005c7113f7bed9a3; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_revoked + ADD CONSTRAINT "PK_578b81f9905005c7113f7bed9a3" PRIMARY KEY (id); + + +-- +-- Name: is_phisher PK_753c1da426677f67c51cd98d35e; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_phisher + ADD CONSTRAINT "PK_753c1da426677f67c51cd98d35e" PRIMARY KEY (id); + + +-- +-- Name: sync_status PK_86336482262ab8d5b548a4a71b7; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.sync_status + ADD CONSTRAINT "PK_86336482262ab8d5b548a4a71b7" PRIMARY KEY (id); + + +-- +-- Name: is_member PK_ab8bdc3ccfa64e2876d744e2e36; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.is_member + ADD CONSTRAINT "PK_ab8bdc3ccfa64e2876d744e2e36" PRIMARY KEY (id); + + +-- +-- Name: block_progress PK_c01eea7890543f34821c499e874; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.block_progress + ADD CONSTRAINT "PK_c01eea7890543f34821c499e874" PRIMARY KEY (id); + + +-- +-- Name: ipld_status PK_fda882aed0a0c022b9f4fccdb1c; Type: CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.ipld_status + ADD CONSTRAINT "PK_fda882aed0a0c022b9f4fccdb1c" PRIMARY KEY (id); + + +-- +-- Name: IDX_00a8ca7940094d8552d67c3b72; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_00a8ca7940094d8552d67c3b72" ON public.block_progress USING btree (block_hash); + + +-- +-- Name: IDX_15ddaa8b6552f12be383fcec4e; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_15ddaa8b6552f12be383fcec4e" ON public.is_revoked USING btree (block_hash, contract_address, key0); + + +-- +-- Name: IDX_3da3a5ba019cd88f366213e48f; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_3da3a5ba019cd88f366213e48f" ON public._owner USING btree (block_hash, contract_address); + + +-- +-- Name: IDX_4bbe5fb40812718baf74cc9a79; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_4bbe5fb40812718baf74cc9a79" ON public.contract USING btree (address); + + +-- +-- Name: IDX_4c753e21652bf260667b3c1fd7; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_4c753e21652bf260667b3c1fd7" ON public.multi_nonce USING btree (block_hash, contract_address, key0, key1); + + +-- +-- Name: IDX_53e551bea07ca0f43c6a7a4cbb; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE INDEX "IDX_53e551bea07ca0f43c6a7a4cbb" ON public.block_progress USING btree (block_number); + + +-- +-- Name: IDX_560b81b666276c48e0b330c22c; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_560b81b666276c48e0b330c22c" ON public.domain_hash USING btree (block_hash, contract_address); + + +-- +-- Name: IDX_679fe4cab2565b7be29dcd60c7; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE INDEX "IDX_679fe4cab2565b7be29dcd60c7" ON public.ipld_block USING btree (block_id, contract_address); + + +-- +-- Name: IDX_9b12e478c35b95a248a04a8fbb; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE INDEX "IDX_9b12e478c35b95a248a04a8fbb" ON public.block_progress USING btree (parent_hash); + + +-- +-- Name: IDX_a6953a5fcd777425c6001c1898; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_a6953a5fcd777425c6001c1898" ON public.ipld_block USING btree (cid); + + +-- +-- Name: IDX_ad541e3a5a00acd4d422c16ada; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE INDEX "IDX_ad541e3a5a00acd4d422c16ada" ON public.event USING btree (block_id, contract); + + +-- +-- Name: IDX_b776a4314e7a73aa666ab272d7; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_b776a4314e7a73aa666ab272d7" ON public.ipld_block USING btree (block_id, contract_address, kind); + + +-- +-- Name: IDX_c86bf8a9f1c566350c422b7d3a; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_c86bf8a9f1c566350c422b7d3a" ON public.is_member USING btree (block_hash, contract_address, key0); + + +-- +-- Name: IDX_d3855d762b0f9fcf9e8a707ef7; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE INDEX "IDX_d3855d762b0f9fcf9e8a707ef7" ON public.event USING btree (block_id, contract, event_name); + + +-- +-- Name: IDX_d67dffa77e472e6163e619f423; Type: INDEX; Schema: public; Owner: vdbm +-- + +CREATE UNIQUE INDEX "IDX_d67dffa77e472e6163e619f423" ON public.is_phisher USING btree (block_hash, contract_address, key0); + + +-- +-- Name: event FK_2b0d35d675c4f99751855c45021; Type: FK CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.event + ADD CONSTRAINT "FK_2b0d35d675c4f99751855c45021" FOREIGN KEY (block_id) REFERENCES public.block_progress(id) ON DELETE CASCADE; + + +-- +-- Name: ipld_block FK_6fe551100c8a6d305b9c22ac6f3; Type: FK CONSTRAINT; Schema: public; Owner: vdbm +-- + +ALTER TABLE ONLY public.ipld_block + ADD CONSTRAINT "FK_6fe551100c8a6d305b9c22ac6f3" FOREIGN KEY (block_id) REFERENCES public.block_progress(id) ON DELETE CASCADE; + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/build/lib/app/data/config/watcher-mobymask/mobymask-watcher.toml b/build/lib/app/data/config/watcher-mobymask/mobymask-watcher.toml new file mode 100644 index 00000000..dd91cf8f --- /dev/null +++ b/build/lib/app/data/config/watcher-mobymask/mobymask-watcher.toml @@ -0,0 +1,53 @@ +[server] + host = "0.0.0.0" + port = 3001 + kind = "active" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # IPFS API address (can be taken from the output on running the IPFS daemon). + # ipfsApiAddr = "/ip4/127.0.0.1/tcp/5001" + + # Boolean to filter logs by contract. + filterLogs = true + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = -1 + +[metrics] + host = "0.0.0.0" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "mobymask-watcher-db" + port = 5432 + database = "mobymask-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server:8083/graphql" + rpcProviderEndpoint = "http://ipld-eth-server:8082" + blockDelayInMilliSecs = 60000 + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@mobymask-watcher-db/mobymask-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 diff --git a/build/lib/app/data/config/watcher-uniswap-v3/erc20-watcher.toml b/build/lib/app/data/config/watcher-uniswap-v3/erc20-watcher.toml new file mode 100644 index 00000000..3e258eeb --- /dev/null +++ b/build/lib/app/data/config/watcher-uniswap-v3/erc20-watcher.toml @@ -0,0 +1,39 @@ +[server] + host = "0.0.0.0" + port = 3001 + mode = "eth_call" + kind = "lazy" + +[metrics] + host = "127.0.0.1" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "uniswap-watcher-db" + port = 5432 + database = "erc20-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + maxQueryExecutionTime = 100 + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql" + rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082" + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/erc20-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + blockDelayInMilliSecs = 2000 diff --git a/build/lib/app/data/config/watcher-uniswap-v3/run.sh b/build/lib/app/data/config/watcher-uniswap-v3/run.sh new file mode 100755 index 00000000..d8dcffb8 --- /dev/null +++ b/build/lib/app/data/config/watcher-uniswap-v3/run.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e +set -u + +echo "Initializing watcher..." +yarn fill --start-block $UNISWAP_START_BLOCK --end-block $((UNISWAP_START_BLOCK + 1)) + +echo "Running active server" +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/build/lib/app/data/config/watcher-uniswap-v3/uni-info-watcher.toml b/build/lib/app/data/config/watcher-uniswap-v3/uni-info-watcher.toml new file mode 100644 index 00000000..2f092845 --- /dev/null +++ b/build/lib/app/data/config/watcher-uniswap-v3/uni-info-watcher.toml @@ -0,0 +1,90 @@ +[server] + host = "0.0.0.0" + port = 3004 + mode = "prod" + kind = "active" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 50000 + + # Enable state creation + enableState = false + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Boolean to skip updating entity fields required in state creation and not required in the frontend. + skipStateFieldsUpdate = false + + # Boolean to load GQL query nested entity relations sequentially. + loadRelationsSequential = false + + # Max GQL API requests to process simultaneously (defaults to 1). + maxSimultaneousRequests = 1 + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "0.0.0.0" + port = 9002 + [metrics.gql] + port = 9003 + +[database] + type = "postgres" + host = "uniswap-watcher-db" + port = 5432 + database = "uni-info-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + maxQueryExecutionTime = 100 + + [database.extra] + # maximum number of clients the pool should contain + max = 20 + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql" + rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082" + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + + [upstream.uniWatcher] + gqlEndpoint = "http://uni-watcher-server:3003/graphql" + gqlSubscriptionEndpoint = "ws://uni-watcher-server:3003/graphql" + + [upstream.tokenWatcher] + gqlEndpoint = "http://erc20-watcher-server:3001/graphql" + gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql" + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/uni-info-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 1000 + eventsInBatch = 50 + subgraphEventsOrder = true + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = true + prefetchBlockCount = 10 diff --git a/build/lib/app/data/config/watcher-uniswap-v3/uni-watcher.toml b/build/lib/app/data/config/watcher-uniswap-v3/uni-watcher.toml new file mode 100644 index 00000000..bc583e9b --- /dev/null +++ b/build/lib/app/data/config/watcher-uniswap-v3/uni-watcher.toml @@ -0,0 +1,41 @@ +[server] + host = "0.0.0.0" + port = 3003 + kind = "active" + +[metrics] + host = "0.0.0.0" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "uniswap-watcher-db" + port = 5432 + database = "uni-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + maxQueryExecutionTime = 100 + +[upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql" + rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082" + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/uni-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 0 + eventsInBatch = 50 + lazyUpdateBlockProgress = true + blockDelayInMilliSecs = 2000 + prefetchBlocksInMem = true + prefetchBlockCount = 10 diff --git a/build/lib/app/data/config/watcher-uniswap-v3/watch-contract.sh b/build/lib/app/data/config/watcher-uniswap-v3/watch-contract.sh new file mode 100755 index 00000000..aaed7e1d --- /dev/null +++ b/build/lib/app/data/config/watcher-uniswap-v3/watch-contract.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e +set -u + +echo "Watching factory contract 0x1F98431c8aD98523631AE4a59f267346ea31F984" +yarn watch:contract --address 0x1F98431c8aD98523631AE4a59f267346ea31F984 --kind factory --startingBlock 12369621 --checkpoint + +echo "Watching nfpm contract 0xC36442b4a4522E871399CD717aBDD847Ab11FE88" +yarn watch:contract --address 0xC36442b4a4522E871399CD717aBDD847Ab11FE88 --kind nfpm --startingBlock 12369651 --checkpoint diff --git a/build/lib/app/data/container-build/build-base.sh b/build/lib/app/data/container-build/build-base.sh new file mode 100755 index 00000000..61705b79 --- /dev/null +++ b/build/lib/app/data/container-build/build-base.sh @@ -0,0 +1,13 @@ +# source'ed into container build scripts to do generic command setup +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x + echo "Build environment variables:" + env +fi +build_command_args="" +if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then + build_command_args="${build_command_args} --no-cache" +fi +if [[ -n "$CERC_CONTAINER_EXTRA_BUILD_ARGS" ]]; then + build_command_args="${build_command_args} ${CERC_CONTAINER_EXTRA_BUILD_ARGS}" +fi diff --git a/build/lib/app/data/container-build/cerc-act-runner-task-executor/build.sh b/build/lib/app/data/container-build/cerc-act-runner-task-executor/build.sh new file mode 100755 index 00000000..25620a53 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-act-runner-task-executor/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Build a local version of the task executor for act-runner +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/gitea/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-act-runner/build.sh b/build/lib/app/data/container-build/cerc-act-runner/build.sh new file mode 100755 index 00000000..89a6e74f --- /dev/null +++ b/build/lib/app/data/container-build/cerc-act-runner/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +# Build a local version of the act-runner image +# TODO: enhance the default build code path to cope with this container (repo has an _ which needs to be converted to - in the image tag) +docker build -t cerc/act-runner:local -f ${CERC_REPO_BASE_DIR}/act_runner/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/act_runner diff --git a/build/lib/app/data/container-build/cerc-builder-gerbil/Dockerfile b/build/lib/app/data/container-build/cerc-builder-gerbil/Dockerfile new file mode 100644 index 00000000..a18dfa72 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-gerbil/Dockerfile @@ -0,0 +1,31 @@ +# From: https://github.com/vyzo/gerbil/blob/master/docker/Dockerfile +FROM gerbil/ubuntu + +# Install the Solidity compiler (latest stable version) +# and guile +# and libsecp256k1-dev +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \ + apt-get install -y software-properties-common && \ + add-apt-repository ppa:ethereum/ethereum && \ + apt-get update && \ + apt-get install -y solc && \ + apt-get install -y guile-3.0 && \ + apt-get install -y libsecp256k1-dev && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +RUN mkdir /scripts +COPY install-dependencies.sh /scripts + +# Override the definition of GERBIL_PATH in the base image, but +# is safe because (at present) no gerbil packages are installed in the base image +# We do this in order to allow a set of pre-installed packages from the container +# to be used with an arbitrary, potentially different set of projects bind mounted +# at /src +ENV GERBIL_PATH=/.gerbil +RUN bash /scripts/install-dependencies.sh + +# Needed to prevent git from raging about /src +RUN git config --global --add safe.directory /src + +COPY entrypoint.sh /scripts +ENTRYPOINT ["/scripts/entrypoint.sh"] diff --git a/build/lib/app/data/container-build/cerc-builder-gerbil/README.md b/build/lib/app/data/container-build/cerc-builder-gerbil/README.md new file mode 100644 index 00000000..854139b9 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-gerbil/README.md @@ -0,0 +1,21 @@ +## Gerbil Scheme Builder + +This container is designed to be used as a simple "build runner" environment for building and running Scheme projects using Gerbil and gerbil-ethereum. Its primary purpose is to allow build/test/run of gerbil code without the need to install and configure all the necessary prerequisites and dependencies on the host system. + +### Usage + +First build the container with: + +``` +$ laconic-so build-containers --include cerc/builder-gerbil +``` + +Now, assuming a gerbil project located at `~/projects/my-project`, run bash in the container mounting the project with: + +``` +$ docker run -it -v $HOME/projects/my-project:/src cerc/builder-gerbil:latest bash +root@7c4124bb09e3:/src# +``` + +Now gerbil commands can be run. + diff --git a/build/lib/app/data/container-build/cerc-builder-gerbil/entrypoint.sh b/build/lib/app/data/container-build/cerc-builder-gerbil/entrypoint.sh new file mode 100755 index 00000000..311cb8cb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-gerbil/entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec "$@" diff --git a/build/lib/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh b/build/lib/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh new file mode 100755 index 00000000..36855a9b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-gerbil/install-dependencies.sh @@ -0,0 +1,16 @@ +DEPS=(github.com/fare/gerbil-utils + github.com/fare/gerbil-poo + github.com/fare/gerbil-crypto + github.com/fare/gerbil-persist + github.com/fare/gerbil-ethereum + github.com/drewc/gerbil-swank + github.com/drewc/drewc-r7rs-swank + github.com/drewc/smug-gerbil + github.com/drewc/ftw + github.com/vyzo/gerbil-libp2p + ) ; +for i in ${DEPS[@]} ; do + echo "Installing gerbil package: $i" + gxpkg install $i + gxpkg build $i +done diff --git a/build/lib/app/data/container-build/cerc-builder-js/Dockerfile b/build/lib/app/data/container-build/cerc-builder-js/Dockerfile new file mode 100644 index 00000000..9e02327e --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/Dockerfile @@ -0,0 +1,72 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# Which depends on: https://github.com/nodejs/docker-node/blob/main/Dockerfile-debian.template +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=18-bullseye +FROM node:${VARIANT} + +# Set these args to change the uid/gid for the base container's "node" user to match that of the host user (so bind mounts work as expected). +ARG CERC_HOST_UID=1000 +ARG CERC_HOST_GID=1000 +# Make these values available at runtime to allow a consistency check. +ENV HOST_UID=${CERC_HOST_UID} +ENV HOST_GID=${CERC_HOST_GID} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} + +SHELL ["/bin/bash", "-c"] + +RUN \ + # Don't switch container uid/gid if the host uid/gid is 1000 (which means it's already correct), + # or root (which won't work anyway) or <= 100 (which also won't work). + if [[ ${CERC_HOST_GID} -ne 1000 && ${CERC_HOST_GID} -ne 0 && ${CERC_HOST_GID} -gt 100 ]]; then \ + groupmod -g ${CERC_HOST_GID} ${USERNAME}; \ + fi \ + && if [[ ${CERC_HOST_UID} -ne 1000 && ${CERC_HOST_UID} -ne 0 && ${CERC_HOST_UID} -gt 100 ]]; then \ + usermod -u ${CERC_HOST_UID} -g ${CERC_HOST_GID} ${USERNAME} && chown ${CERC_HOST_UID}:${CERC_HOST_GID} /home/${USERNAME}; \ + fi + +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + && npm cache clean --force > /dev/null 2>&1 + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq + +# [Optional] Uncomment if you want to install an additional version of node using nvm +# ARG EXTRA_NODE_VERSION=10 +# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" + +# [Optional] Uncomment if you want to install more global node modules +# RUN su node -c "npm install -g " + +RUN mkdir /scripts +COPY build-npm-package.sh /scripts +COPY yarn-local-registry-fixup.sh /scripts +COPY build-npm-package-local-dependencies.sh /scripts +COPY check-uid.sh /scripts +ENV PATH="${PATH}:/scripts" + +COPY entrypoint.sh . +ENTRYPOINT ["./entrypoint.sh"] +# Placeholder CMD : generally this will be overridden at run time like : +# docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build' +CMD node --version diff --git a/build/lib/app/data/container-build/cerc-builder-js/README.md b/build/lib/app/data/container-build/cerc-builder-js/README.md new file mode 100644 index 00000000..e77136f8 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/README.md @@ -0,0 +1,17 @@ +## JS/TS Package Builder + +This container is designed to be used as a simple "build runner" environment for building and publishing JS/TS projects +using `yarn`. + +### Running a build + +As a temporary measure while the necessary functionality is being added to Stack Orchestrator, +it is possible to build packages manually by invoking `docker run` , for example as follows: + + +``` +$ docker run --rm -it --add-host gitea.local:host-gateway \ + -v ${HOME}/cerc/laconic-registry-cli:/workspace cerc/builder-js \ + sh -c 'cd /workspace && CERC_NPM_AUTH_TOKEN=6613572a28ebebaee20ccd90064251fa8c2b94f6 \ + build-npm-package-local-dependencies.sh http://gitea.local:3000/api/packages/cerc-io/npm/ 0.1.8' +``` diff --git a/build/lib/app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh b/build/lib/app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh new file mode 100755 index 00000000..5ae0e073 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/build-npm-package-local-dependencies.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Usage: build-npm-package-local-dependencies.sh +# Runs build-npm-package.sh after first fixing up yarn.lock to use a local +# npm registry for all packages in a specific scope (currently @cerc-io, @lirewine and @muknsys) +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +if ! [[ $# -eq 1 || $# -eq 2 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +if [[ -z "${CERC_NPM_AUTH_TOKEN}" ]]; then + echo "CERC_NPM_AUTH_TOKEN is not set" >&2 + exit 1 +fi +# Exit on error +set -e +local_npm_registry_url=$1 +package_publish_version=$2 +# If we need to handle an additional scope, add it to the list below: +npm_scopes_to_handle=("@cerc-io" "@lirewine" "@muknsys") +for npm_scope_for_local in ${npm_scopes_to_handle[@]} +do + # We need to configure the local registry + npm config set ${npm_scope_for_local}:registry ${local_npm_registry_url} + npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN} + # Find the set of dependencies from the specified scope + mapfile -t dependencies_from_scope < <(cat package.json | jq -r '.dependencies | with_entries(if (.key|test("^'${npm_scope_for_local}'/.*$")) then ( {key: .key, value: .value } ) else empty end ) | keys[]') + echo "Fixing up dependencies in scope ${npm_scope_for_local}" + for package in "${dependencies_from_scope[@]}" + do + echo "Fixing up package ${package}" + yarn-local-registry-fixup.sh $package ${local_npm_registry_url} + done +done +echo "Running build" +build-npm-package.sh ${local_npm_registry_url} ${package_publish_version} diff --git a/build/lib/app/data/container-build/cerc-builder-js/build-npm-package.sh b/build/lib/app/data/container-build/cerc-builder-js/build-npm-package.sh new file mode 100755 index 00000000..db27955c --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/build-npm-package.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Usage: build-npm-package.sh +# Note: supply the registry auth token in CERC_NPM_AUTH_TOKEN +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi +if ! [[ $# -eq 1 || $# -eq 2 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +if [[ -z "${CERC_NPM_AUTH_TOKEN}" ]]; then + echo "CERC_NPM_AUTH_TOKEN is not set" >&2 + exit 1 +fi +if [[ $# -eq 2 ]]; then + package_publish_version=$2 +else + package_publish_version=$( cat package.json | jq -r .version ) +fi +# Exit on error +set -e +# Get the name of this package from package.json since we weren't passed that +package_name=$( cat package.json | jq -r .name ) +local_npm_registry_url=$1 +npm config set @cerc-io:registry ${local_npm_registry_url} +npm config set @lirewine:registry ${local_npm_registry_url} +npm config set @muknsys:registry ${local_npm_registry_url} +# Workaround bug in npm unpublish where it needs the url to be of the form // and not http:// +local_npm_registry_url_fixed=$( echo ${local_npm_registry_url} | sed -e 's/^http[s]\{0,1\}://') +npm config set -- ${local_npm_registry_url_fixed}:_authToken ${CERC_NPM_AUTH_TOKEN} +# First check if the version of this package we're trying to build already exists in the registry +package_exists=$( yarn info --json ${package_name}@${package_publish_version} 2>/dev/null | jq -r .data.dist.tarball ) +if [[ ! -z "$package_exists" && "$package_exists" != "null" ]]; then + echo "${package_publish_version} of ${package_name} already exists in the registry" + if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then + # Attempt to unpublish the existing package + echo "NOTE: unpublishing existing package version since force rebuild is enabled" + npm unpublish --force ${package_name}@${package_publish_version} + else + echo "skipping build since target version already exists" + exit 0 + fi +fi +echo "Build and publish ${package_name} version ${package_publish_version}" +yarn install +yarn build +yarn publish --non-interactive --new-version ${package_publish_version} --no-git-tag-version diff --git a/build/lib/app/data/container-build/cerc-builder-js/check-uid.sh b/build/lib/app/data/container-build/cerc-builder-js/check-uid.sh new file mode 100755 index 00000000..a8cbc324 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/check-uid.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Make the container usable for uid/gid != 1000 +if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then + set -x +fi +current_uid=$(id -u) +current_gid=$(id -g) +# Don't check if running as root +if [[ ${current_uid} == 0 ]]; then + exit 0 +fi +# Check the current uid/gid vs the uid/gid used to build the container. +# We do this because both bind mounts and npm tooling require the uid/gid to match. +if [[ ${current_gid} != ${HOST_GID} ]]; then + echo "Warning: running with gid: ${current_gid} which is not the gid for which this container was built (${HOST_GID})" + exit 0 +fi +if [[ ${current_uid} != ${HOST_UID} ]]; then + echo "Warning: running with gid: ${current_uid} which is not the uid for which this container was built (${HOST_UID})" + exit 0 +fi diff --git a/build/lib/app/data/container-build/cerc-builder-js/entrypoint.sh b/build/lib/app/data/container-build/cerc-builder-js/entrypoint.sh new file mode 100755 index 00000000..ab80737a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/sh +/scripts/check-uid.sh +exec "$@" diff --git a/build/lib/app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh b/build/lib/app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh new file mode 100755 index 00000000..ad11ce80 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-builder-js/yarn-local-registry-fixup.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Usage: yarn-local-registry-fixup.sh +# Assumes package.json and yarn.lock are in the cwd +# The purpose of this script is to take a project cloned from git +# and "fixup" its yarn.lock file such that specified dependency +# will be fetched from a registry other than the one used when +# yarn.lock was generated. It updates all checksums using data +# from the "new" registry (because due to embedded timestamps etc +# the same source code re-built later will not have the same checksum). +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +if [[ $# -ne 2 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +# Exit on error +set -e +target_package=$1 +local_npm_registry_url=$2 +# Extract the actual version pinned in yarn.lock +# See: https://stackoverflow.com/questions/60454251/how-to-know-the-version-of-currently-installed-package-from-yarn-lock +versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name') +# Use yarn info to get URL checksums etc from the new registry +yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null) +# First check if the target version actually exists. +# If it doesn't exist there will be no .data.dist.tarball element, +# and jq will output the string "null" +package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball) +if [[ $package_tarball == "null" ]]; then + echo "FATAL: Target package version ($versioned_target_package) not found" >&2 + exit 1 +fi +# Code below parses out the values we need +# When running inside a container, the registry can return a URL with the wrong host name due to proxying +# so we need to check if that has happened and fix the URL if so. +if ! [[ "${package_tarball}" =~ ^${local_npm_registry_url}.* ]]; then + # HACK: I've hard-wired the host names below. Replace with proper implementation + # TODO: remove the hack when proven no longer necessary + package_tarball=$( echo ${package_tarball} | sed -e 's/localhost/gitea.local/g' ) +fi +package_integrity=$(echo $yarn_info_output | jq -r .data.dist.integrity) +package_shasum=$(echo $yarn_info_output | jq -r .data.dist.shasum) +package_resolved=${package_tarball}#${package_shasum} +# Some strings need to be escaped so they work when passed to sed later +escaped_package_integrity=$(printf '%s\n' "$package_integrity" | sed -e 's/[\/&]/\\&/g') +escaped_package_resolved=$(printf '%s\n' "$package_resolved" | sed -e 's/[\/&]/\\&/g') +escaped_target_package=$(printf '%s\n' "$target_package" | sed -e 's/[\/&]/\\&/g') +if [ -n "$CERC_SCRIPT_VERBOSE" ]; then + echo "Tarball: ${package_tarball}" + echo "Integrity: ${package_integrity}" + echo "Shasum: ${package_shasum}" + echo "Resolved: ${package_resolved}" +fi +# Use magic sed regex to replace the values in yarn.lock +# Note: yarn.lock is not json so we can not use jq for this +sed -i -e '/^\"'${escaped_target_package}'.*\":$/ , /^\".*$/ s/^\([[:space:]]\{1,\}resolved \).*$/\1'\"${escaped_package_resolved}\"'/' yarn.lock +sed -i -e '/^\"'${escaped_target_package}'.*\":$/ , /^\".*$/ s/^\([[:space:]]\{1,\}integrity \).*$/\1'${escaped_package_integrity}'/' yarn.lock diff --git a/build/lib/app/data/container-build/cerc-eth-probe/build.sh b/build/lib/app/data/container-build/cerc-eth-probe/build.sh new file mode 100755 index 00000000..c90e0627 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-eth-probe/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/eth-probe +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/eth-probe:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-probe diff --git a/build/lib/app/data/container-build/cerc-eth-statediff-fill-service/build.sh b/build/lib/app/data/container-build/cerc-eth-statediff-fill-service/build.sh new file mode 100755 index 00000000..03896052 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-eth-statediff-fill-service/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/eth-statediff-fill-service +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/eth-statediff-fill-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-fill-service diff --git a/build/lib/app/data/container-build/cerc-eth-statediff-service/build.sh b/build/lib/app/data/container-build/cerc-eth-statediff-service/build.sh new file mode 100755 index 00000000..07c2d2ef --- /dev/null +++ b/build/lib/app/data/container-build/cerc-eth-statediff-service/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/eth-statediff-service +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/eth-statediff-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-service diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile new file mode 100644 index 00000000..63c3c0a7 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/Dockerfile @@ -0,0 +1,27 @@ +FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen + +# Using the same golang image as used to build geth: https://github.com/cerc-io/go-ethereum/blob/HEAD/Dockerfile +FROM golang:1.18-alpine as delve +RUN go install github.com/go-delve/delve/cmd/dlv@latest + +FROM cerc/go-ethereum:local as geth + +FROM alpine:latest +RUN apk add --no-cache python3 python3-dev py3-pip curl wget jq build-base gettext libintl openssl bash bind-tools postgresql-client + +COPY --from=delve /go/bin/dlv /usr/local/bin/ +COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/ +COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/ +COPY --from=ethgen /apps /apps + +RUN cd /apps/el-gen && pip3 install -r requirements.txt + +COPY genesis /opt/testnet +COPY run-el.sh /opt/testnet/run.sh + +RUN cd /opt/testnet && make genesis-el + +COPY --from=geth /usr/local/bin/geth /usr/local/bin/ +RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey + +ENTRYPOINT ["/opt/testnet/run.sh"] diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/build.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/build.sh new file mode 100755 index 00000000..a00a3f17 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Build cerc/fixturenet-eth-geth + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/fixturenet-eth-geth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/Makefile b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/Makefile new file mode 100644 index 00000000..a1eef575 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/Makefile @@ -0,0 +1,13 @@ +.PHONY: build +build: genesis + +.PHONY: genesis +genesis: genesis-el + +.PHONY: genesis-el +genesis-el: + cd el; ./build_el.sh + +.PHONY: clean +clean: + rm -rf build diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/import_keys.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/import_keys.sh new file mode 100755 index 00000000..e8dce0ea --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/import_keys.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +ACCOUNT_PASSWORD=${ACCOUNT_PASSWORD:-secret1212} + +for line in `cat ../build/el/accounts.csv`; do + BIP44_PATH="`echo "$line" | cut -d',' -f1`" + ADDRESS="`echo "$line" | cut -d',' -f2`" + PRIVATE_KEY="`echo "$line" | cut -d',' -f3`" + + echo "$ACCOUNT_PASSWORD" > .pw.$$ + echo "$PRIVATE_KEY" | sed 's/0x//' > .key.$$ + + echo "" + echo "$ADDRESS" + geth account import --datadir=~/ethdata --password .pw.$$ .key.$$ + rm -f .pw.$$ .key.$$ +done diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/mnemonic_to_csv.py b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/mnemonic_to_csv.py new file mode 100644 index 00000000..365c3775 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/accounts/mnemonic_to_csv.py @@ -0,0 +1,17 @@ +from web3.auto import w3 +import json +import ruamel.yaml as yaml +import sys + +w3.eth.account.enable_unaudited_hdwallet_features() + +testnet_config_path = "genesis-config.yaml" +if len(sys.argv) > 1: + testnet_config_path = sys.argv[1] + +with open(testnet_config_path) as stream: + data = yaml.safe_load(stream) + +for key, value in data['el_premine'].items(): + acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='') + print("%s,%s,%s" % (key, acct.address, acct.key.hex())) diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/build_el.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/build_el.sh new file mode 100755 index 00000000..76b43daf --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/build_el.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -e + +# See: https://github.com/skylenet/ethereum-genesis-generator/blob/master/entrypoint.sh + +rm -rf ../build/el +mkdir -p ../build/el + +tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) +envsubst < el-config.yaml > $tmp_dir/genesis-config.yaml + +ttd=`cat $tmp_dir/genesis-config.yaml | grep terminal_total_difficulty | awk '{ print $2 }'` +homestead_block=`cat $tmp_dir/genesis-config.yaml | grep homestead_block | awk '{ print $2 }'` +eip150_block=`cat $tmp_dir/genesis-config.yaml | grep eip150_block | awk '{ print $2 }'` +eip155_block=`cat $tmp_dir/genesis-config.yaml | grep eip155_block | awk '{ print $2 }'` +eip158_block=`cat $tmp_dir/genesis-config.yaml | grep eip158_block | awk '{ print $2 }'` +byzantium_block=`cat $tmp_dir/genesis-config.yaml | grep byzantium_block | awk '{ print $2 }'` +constantinople_block=`cat $tmp_dir/genesis-config.yaml | grep constantinople_block | awk '{ print $2 }'` +petersburg_block=`cat $tmp_dir/genesis-config.yaml | grep petersburg_block | awk '{ print $2 }'` +istanbul_block=`cat $tmp_dir/genesis-config.yaml | grep istanbul_block | awk '{ print $2 }'` +berlin_block=`cat $tmp_dir/genesis-config.yaml | grep berlin_block | awk '{ print $2 }'` +london_block=`cat $tmp_dir/genesis-config.yaml | grep london_block | awk '{ print $2 }'` +merge_fork_block=`cat $tmp_dir/genesis-config.yaml | grep merge_fork_block | awk '{ print $2 }'` + +python3 /apps/el-gen/genesis_geth.py $tmp_dir/genesis-config.yaml | \ + jq ".config.terminalTotalDifficulty=$ttd" | \ + jq ".config.homesteadBlock=$homestead_block" | \ + jq ".config.eip150Block=$eip150_block" | \ + jq ".config.eip155Block=$eip155_block" | \ + jq ".config.eip158Block=$eip158_block" | \ + jq ".config.byzantiumBlock=$byzantium_block" | \ + jq ".config.constantinopleBlock=$constantinople_block" | \ + jq ".config.petersburgBlock=$petersburg_block" | \ + jq ".config.istanbulBlock=$istanbul_block" | \ + jq ".config.berlinBlock=$berlin_block" | \ + jq ".config.londonBlock=$london_block" | \ + jq ".config.mergeForkBlock=$merge_fork_block" > ../build/el/geth.json +python3 ../accounts/mnemonic_to_csv.py $tmp_dir/genesis-config.yaml > ../build/el/accounts.csv diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/el-config.yaml b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/el-config.yaml new file mode 100644 index 00000000..8bcb0fac --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/genesis/el/el-config.yaml @@ -0,0 +1,31 @@ +mnemonic: "viable ketchup woman library opinion copy rhythm attend rose knock penalty practice photo bundle budget dentist enter round bind holiday useful arch danger lobster" +el_premine: + "m/44'/60'/0'/0/0": 10000000ETH + "m/44'/60'/0'/0/1": 10000000ETH + "m/44'/60'/0'/0/2": 10000000ETH + "m/44'/60'/0'/0/3": 10000000ETH + "m/44'/60'/0'/0/4": 10000000ETH + "m/44'/60'/0'/0/5": 10000000ETH +el_premine_addrs: {} +chain_id: 1212 +deposit_contract_address: "0x1212121212121212121212121212121212121212" +genesis_timestamp: 0 +terminal_total_difficulty: 1000 +homestead_block: 1 +eip150_block: 1 +eip155_block: 1 +eip158_block: 1 +byzantium_block: 1 +constantinople_block: 1 +petersburg_block: 1 +istanbul_block: 1 +berlin_block: 1 +london_block: 1 +merge_fork_block: 1 + +clique: + enabled: false + signers: + - 36d56343bc308d4ffaac2f793d121aba905fa6cc + - 5e762d4a3847cadaf40a4b0c39574b0ff6698c78 + - 15d7acc1019fdf8ab4f0f7bd31ec1487ecb5a2bd diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh new file mode 100755 index 00000000..526c76d7 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-geth/run-el.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` +NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` +NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'` + +HOME_DIR=`pwd` +cd /opt/testnet/build/el +python3 -m http.server 9898 & +cd $HOME_DIR + +START_CMD="geth" +if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then + START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --" +fi + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + + # Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process) + pkill -P ${geth_pid} + sleep 2 + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +if [ "true" == "$RUN_BOOTNODE" ]; then + $START_CMD \ + --datadir=~/ethdata \ + --nodekeyhex="${BOOTNODE_KEY}" \ + --nodiscover \ + --ipcdisable \ + --networkid=${NETWORK_ID} \ + --netrestrict="${NETRESTRICT}" \ + & + + geth_pid=$! +else + cd /opt/testnet/accounts + ./import_keys.sh + + echo -n "$JWT" > /opt/testnet/build/el/jwtsecret + + if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then + dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short) + dig_status_code=$? + if [[ $dig_status_code = 0 && -n $dig_result ]]; then + echo "Statediff DB at $CERC_STATEDIFF_DB_HOST" + CERC_RUN_STATEDIFF="true" + else + echo "No statediff DB available." + CERC_RUN_STATEDIFF="false" + fi + fi + + STATEDIFF_OPTS="" + if [ "$CERC_RUN_STATEDIFF" == "true" ]; then + ready=0 + while [ $ready -eq 0 ]; do + echo "Waiting for statediff DB..." + sleep 1 + export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" + result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ + -p "$CERC_STATEDIFF_DB_PORT" \ + -U "$CERC_STATEDIFF_DB_USER" \ + -d "$CERC_STATEDIFF_DB_NAME" \ + -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') + if [ -n "$result" ] && [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then + echo "DB ready..." + ready=1 + fi + done + STATEDIFF_OPTS="--statediff=true \ + --statediff.db.host=$CERC_STATEDIFF_DB_HOST \ + --statediff.db.name=$CERC_STATEDIFF_DB_NAME \ + --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ + --statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \ + --statediff.db.port=$CERC_STATEDIFF_DB_PORT \ + --statediff.db.user=$CERC_STATEDIFF_DB_USER \ + --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ + --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ + --statediff.waitforsync=true \ + --statediff.writing=true" + fi + + $START_CMD \ + --datadir=~/ethdata \ + --bootnodes="${ENODE}" \ + --allow-insecure-unlock \ + --http \ + --http.addr="0.0.0.0" \ + --http.vhosts="*" \ + --http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --authrpc.addr="0.0.0.0" \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \ + --ws \ + --ws.addr="0.0.0.0" \ + --ws.origins="*" \ + --ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \ + --http.corsdomain="*" \ + --networkid="${NETWORK_ID}" \ + --netrestrict="${NETRESTRICT}" \ + --gcmode archive \ + --txlookuplimit=0 \ + --cache.preimages \ + --syncmode=full \ + --mine \ + --miner.threads=1 \ + --metrics \ + --metrics.addr="0.0.0.0" \ + --verbosity=${CERC_GETH_VERBOSITY:-3} \ + --vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ + --miner.etherbase="${ETHERBASE}" ${STATEDIFF_OPTS} \ + & + + geth_pid=$! +fi + +wait $geth_pid diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile new file mode 100644 index 00000000..2295262d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/Dockerfile @@ -0,0 +1,34 @@ +FROM sigp/lcli:v3.2.1 AS lcli +FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen +FROM cerc/fixturenet-eth-geth:local AS fnetgeth + +FROM cerc/lighthouse:local + +# cerc/lighthouse is based on Ubuntu +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + libssl-dev ca-certificates \ + curl socat iproute2 telnet wget jq \ + build-essential python3 python3-dev python3-pip gettext-base \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY genesis /opt/testnet +COPY run-cl.sh /opt/testnet/run.sh + +COPY --from=lcli /usr/local/bin/lcli /usr/local/bin/lcli +COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/eth2-testnet-genesis +COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/eth2-val-tools +COPY --from=ethgen /apps /apps +COPY --from=fnetgeth /opt/testnet/el /opt/testnet/el +COPY --from=fnetgeth /opt/testnet/build/el /opt/testnet/build/el + +RUN cd /opt/testnet && make genesis-cl + +# Work around some bugs in lcli where the default path is always used. +RUN mkdir -p /root/.lighthouse && cd /root/.lighthouse && ln -s /opt/testnet/build/cl/testnet + +RUN mkdir -p /scripts +COPY scripts/status-internal.sh /scripts +COPY scripts/status.sh /scripts + +ENTRYPOINT ["/opt/testnet/run.sh"] diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh new file mode 100755 index 00000000..e7e06039 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Build cerc/fixturenet-eth-lighthouse + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/fixturenet-eth-lighthouse:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile new file mode 100644 index 00000000..5a91fbb1 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/Makefile @@ -0,0 +1,13 @@ +.PHONY: build +build: genesis + +.PHONY: genesis +genesis: genesis-cl + +.PHONY: genesis-cl +genesis-cl: + cd cl; ./build_cl.sh + +.PHONY: clean +clean: + rm -rf build diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh new file mode 100755 index 00000000..1f906155 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/beacon_node.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# See: https://github.com/sigp/lighthouse/blob/stable/scripts/local_testnet/beacon_node.sh +# +# Starts a beacon node based upon a genesis state created by `./setup.sh`. +# + +set -Eeuo pipefail + +source ./vars.env + +SUBSCRIBE_ALL_SUBNETS= +DEBUG_LEVEL=${DEBUG_LEVEL:-debug} + +# Get positional arguments +data_dir=$DATADIR/node_${NODE_NUMBER} +network_port=9001 +http_port=8001 +authrpc_port=8551 + +exec lighthouse \ + bn \ + $SUBSCRIBE_ALL_SUBNETS \ + --debug-level $DEBUG_LEVEL \ + --boot-nodes "$ENR" \ + --datadir $data_dir \ + --testnet-dir $TESTNET_DIR \ + --enable-private-discovery \ + --staking \ + --enr-address $ENR_IP \ + --enr-udp-port $network_port \ + --enr-tcp-port $network_port \ + --port $network_port \ + --http-address 0.0.0.0 \ + --http-port $http_port \ + --disable-packet-filter \ + --execution-endpoint $EXECUTION_ENDPOINT \ + --execution-jwt $JWTSECRET \ + --terminal-total-difficulty-override $ETH1_TTD \ + --suggested-fee-recipient $SUGGESTED_FEE_RECIPIENT \ + --target-peers $((BN_COUNT - 1)) \ + --http-allow-sync-stalled \ diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh new file mode 100755 index 00000000..a395f41a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/bootnode.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# See: https://github.com/sigp/lighthouse/blob/stable/scripts/local_testnet/bootnode.sh +# +# Starts a bootnode from the generated enr. +# + +set -Eeuo pipefail + +source ./vars.env + +DEBUG_LEVEL=${1:-info} + +echo "Starting bootnode" + +if [ ! -f "$DATADIR/bootnode/enr.dat" ]; then + echo "Generating bootnode enr" + lcli \ + generate-bootnode-enr \ + --ip $ENR_IP \ + --udp-port $BOOTNODE_PORT \ + --tcp-port $BOOTNODE_PORT \ + --genesis-fork-version $GENESIS_FORK_VERSION \ + --output-dir $DATADIR/bootnode-temp + + # Output ENR to a temp dir and mv as "lcli generate-bootnode-enr" will not overwrite an empty dir (mounted volume) + mkdir -p $DATADIR/bootnode + mv $DATADIR/bootnode-temp/* $DATADIR/bootnode + rm -r $DATADIR/bootnode-temp + + echo "Generated bootnode enr" +else + echo "Found existing bootnode enr" +fi + +bootnode_enr=`cat $DATADIR/bootnode/enr.dat` +echo "- $bootnode_enr" > $TESTNET_DIR/boot_enr.yaml +echo "Written bootnode enr to $TESTNET_DIR/boot_enr.yaml" + +exec lighthouse boot_node \ + --testnet-dir $TESTNET_DIR \ + --port $BOOTNODE_PORT \ + --listen-address 0.0.0.0 \ + --disable-packet-filter \ + --network-dir $DATADIR/bootnode \ diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh new file mode 100755 index 00000000..ab3ad2af --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/build_cl.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# +# Deploys the deposit contract and makes deposits for $VALIDATOR_COUNT insecure deterministic validators. +# Produces a testnet specification and a genesis state where the genesis time +# is now + $GENESIS_DELAY. +# +# Generates datadirs for multiple validator keys according to the +# $VALIDATOR_COUNT and $BN_COUNT variables. +# + +set -o nounset -o errexit -o pipefail + +source ./vars.env + +rm -rf $DATADIR +mkdir -p $DATADIR + +NOW=`date +%s` +GENESIS_TIME=`expr $NOW + $GENESIS_DELAY` + +echo "Creating testnet ..." +echo "(Note: errors of the form 'WARN: Scrypt parameters are too weak...' below can be safely ignored)" +lcli \ + new-testnet \ + --spec $SPEC_PRESET \ + --deposit-contract-address $ETH1_DEPOSIT_CONTRACT_ADDRESS \ + --testnet-dir $TESTNET_DIR \ + --min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \ + --min-genesis-time $GENESIS_TIME \ + --genesis-delay $GENESIS_DELAY \ + --genesis-fork-version $GENESIS_FORK_VERSION \ + --altair-fork-epoch $ALTAIR_FORK_EPOCH \ + --merge-fork-epoch $MERGE_FORK_EPOCH \ + --eth1-id $ETH1_CHAIN_ID \ + --eth1-follow-distance 1 \ + --seconds-per-slot $SECONDS_PER_SLOT \ + --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ + --force + +echo Specification generated at $TESTNET_DIR. +echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)" + +lcli \ + insecure-validators \ + --count $VALIDATOR_COUNT \ + --base-dir $DATADIR \ + --node-count $BN_COUNT + +echo Validators generated with keystore passwords at $DATADIR. +echo "Building genesis state... (this might take a while)" + +lcli \ + interop-genesis \ + --spec $SPEC_PRESET \ + --genesis-time $GENESIS_TIME \ + --testnet-dir $TESTNET_DIR \ + $GENESIS_VALIDATOR_COUNT + +echo Created genesis state in $TESTNET_DIR diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh new file mode 100755 index 00000000..ef0a184d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/ready.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +LIGHTHOUSE_BASE_URL=http://localhost:8001 + +result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.data.message.body.execution_payload.block_number'` +if [ ! -z "$result" ] && [ $result -gt 0 ]; then + exit 0 +fi + +exit 1 diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh new file mode 100755 index 00000000..f21a8ee0 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/reset_genesis_time.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# See: https://github.com/sigp/lighthouse/blob/stable/scripts/local_testnet/reset_genesis_time.sh +# +# Resets the beacon state genesis time to now. +# + +set -Eeuo pipefail + +source ./vars.env + +NOW=${1:-`date +%s`} + +lcli \ + change-genesis-time \ + $TESTNET_DIR/genesis.ssz \ + $NOW + +echo "Reset genesis time to ($NOW)" diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh new file mode 100755 index 00000000..30168f80 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/validator_client.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# See: https://github.com/sigp/lighthouse/blob/stable/scripts/local_testnet/validator_client.sh +# +# Usage: ./validator_client.sh + +set -Eeuo pipefail + +source ./vars.env + +DEBUG_LEVEL=info + +BUILDER_PROPOSALS= + +# Get options +while getopts "pd:" flag; do + case "${flag}" in + p) BUILDER_PROPOSALS="--builder-proposals";; + d) DEBUG_LEVEL=${OPTARG};; + esac +done + +exec lighthouse \ + vc \ + $BUILDER_PROPOSALS \ + --debug-level $DEBUG_LEVEL \ + --validators-dir $DATADIR/node_$NODE_NUMBER/validators \ + --secrets-dir $DATADIR/node_$NODE_NUMBER/secrets \ + --testnet-dir $TESTNET_DIR \ + --init-slashing-protection \ + --beacon-nodes http://localhost:8001 \ + --suggested-fee-recipient $SUGGESTED_FEE_RECIPIENT \ + $VC_ARGS diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env new file mode 100644 index 00000000..d68cbdcc --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/genesis/cl/vars.env @@ -0,0 +1,54 @@ +# Base directories for the validator keys and secrets +DATADIR=${DATADIR:-../build/cl} + +# Directory for the eth2 config +TESTNET_DIR=${TESTNET_DIR:-$DATADIR/testnet} +JWTSECRET=${JWTSECRET:-$DATADIR/jwtsecret} +ENR=${ENR:="SET_AT_RUNTIME"} +ENR_IP=`ip addr | grep inet | grep -v '127.0.0.1' | sort | head -1 | awk '{print $2}' | cut -d '/' -f1` + +GENESIS_FORK_VERSION=${GENESIS_FORK_VERSION:-0x12121212} + +VALIDATOR_COUNT=${VALIDATOR_COUNT:-80} +GENESIS_VALIDATOR_COUNT=${GENESIS_VALIDATOR_COUNT:-80} + +# Number of beacon_node instances that you intend to run +BN_COUNT=${BN_COUNT:-2} + +# Number of validator clients +VC_COUNT=${VC_COUNT:-$BN_COUNT} + +# Number of seconds to delay to start genesis block. +# If started by a script this can be 0, if starting by hand +# use something like 180. +GENESIS_DELAY=${GENESIS_DELAY:-0} + +# Port for P2P communication with bootnode +BOOTNODE_PORT=${BOOTNODE_PORT:-4242} + +# Hard fork configuration +ALTAIR_FORK_EPOCH=${ALTAIR_FORK_EPOCH:-0} +MERGE_FORK_EPOCH=${MERGE_FORK_EPOCH:-0} + +# Spec version (mainnet or minimal) +SPEC_PRESET=${SPEC_PRESET:-mainnet} + +# Seconds per Eth2 slot +SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-3} + +# Seconds per Eth1 block +SECONDS_PER_ETH1_BLOCK=${SECONDS_PER_ETH1_BLOCK:-1} + +# Command line arguments for validator client +VC_ARGS=${VC_ARGS:-""} + +EXECUTION_ENDPOINT=${EXECUTION_ENDPOINT:-http://localhost:8551} + +ETH1_GENESIS_JSON=${ETH1_GENESIS_JSON:-"../build/el/geth.json"} +ETH1_CONFIG_YAML=${ETH1_CONFIG_YAML:-"../el/el-config.yaml"} + +ETH1_CHAIN_ID=${ETH1_CHAIN_ID:-`cat $ETH1_GENESIS_JSON | jq -r '.config.chainId'`} +ETH1_TTD=${ETH1_TTD:-`cat $ETH1_GENESIS_JSON | jq -r '.config.terminalTotalDifficulty'`} +ETH1_DEPOSIT_CONTRACT_ADDRESS=${ETH1_DEPOSIT_CONTRACT_ADDRESS:-`cat $ETH1_CONFIG_YAML | grep 'deposit_contract_address' | awk '{ print $2 }' | sed 's/"//g'`} +ETH1_DEPOSIT_CONTRACT_BLOCK=${ETH1_DEPOSIT_CONTRACT_BLOCK:-0x0} +SUGGESTED_FEE_RECIPIENT=`cat ../build/el/accounts.csv | head -1 | cut -d',' -f2` diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh new file mode 100755 index 00000000..ec0e9aeb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/run-cl.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script +cleanup() { + echo "Signal received, cleaning up..." + kill $(jobs -p) + + wait + echo "Done" +} +trap 'cleanup' SIGINT SIGTERM + +if [ "true" == "$RUN_BOOTNODE" ]; then + cd /opt/testnet/build/cl + python3 -m http.server 3000 & + + + cd /opt/testnet/cl + ./bootnode.sh 2>&1 | tee /var/log/lighthouse_bootnode.log & + bootnode_pid=$! + + wait $bootnode_pid +else + while [ 1 -eq 1 ]; do + echo "Waiting on geth ..." + sleep 5 + result=`wget --no-check-certificate --quiet \ + -O - \ + --method POST \ + --timeout=0 \ + --header 'Content-Type: application/json' \ + --body-data '{ "jsonrpc": "2.0", "id": 1, "method": "eth_blockNumber", "params": [] }' "${ETH1_ENDPOINT:-localhost:8545}" | jq -r '.result'` + if [ ! -z "$result" ] && [ "null" != "$result" ]; then + break + fi + done + + cd /opt/testnet/cl + + if [ -z "$LIGHTHOUSE_GENESIS_STATE_URL" ]; then + # Check if beacon node data exists to avoid resetting genesis time on a restart + if [ -d /opt/testnet/build/cl/node_"$NODE_NUMBER"/beacon ]; then + echo "Skipping genesis time reset" + else + ./reset_genesis_time.sh + fi + else + while [ 1 -eq 1 ]; do + echo "Waiting on Genesis time ..." + sleep 5 + result=`wget --no-check-certificate --quiet -O - --timeout=0 $LIGHTHOUSE_GENESIS_STATE_URL | jq -r '.data.genesis_time'` + if [ ! -z "$result" ]; then + ./reset_genesis_time.sh $result + break; + fi + done + fi + + if [ ! -z "$ENR_URL" ]; then + while [ 1 -eq 1 ]; do + echo "Waiting on ENR for boot node..." + sleep 5 + result=`wget --no-check-certificate --quiet -O - --timeout=0 $ENR_URL` + if [ ! -z "$result" ]; then + export ENR="$result" + break; + fi + done + fi + + export JWTSECRET="/opt/testnet/build/cl/jwtsecret" + echo -n "$JWT" > $JWTSECRET + + ./beacon_node.sh 2>&1 | tee /var/log/lighthouse_bn.log & + beacon_pid=$! + ./validator_client.sh 2>&1 | tee /var/log/lighthouse_vc.log & + validator_pid=$! + + wait $beacon_pid $validator_pid +fi diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh new file mode 100755 index 00000000..628d7d48 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status-internal.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Wrapper to facilitate using status.sh inside the container +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +export LIGHTHOUSE_BASE_URL="http://fixturenet-eth-lighthouse-1:8001" +export GETH_BASE_URL="http://fixturenet-eth-geth-1:8545" +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +$SCRIPT_DIR/status.sh diff --git a/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh new file mode 100755 index 00000000..c6e65a68 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +STATUSES=("geth to generate DAG" "beacon phase0" "beacon altair" "beacon bellatrix pre-merge" "beacon bellatrix merge") +STATUS=0 + + +LIGHTHOUSE_BASE_URL=${LIGHTHOUSE_BASE_URL} +GETH_BASE_URL=${GETH_BASE_URL} + +# TODO: Docker commands below should be replaced by some interface into stack orchestrator +# or some execution environment-neutral mechanism. +if [ -z "$LIGHTHOUSE_BASE_URL" ]; then + LIGHTHOUSE_CONTAINER=`docker ps -q -f "name=fixturenet-eth-lighthouse-1-1"` + LIGHTHOUSE_PORT=`docker port $LIGHTHOUSE_CONTAINER 8001 | cut -d':' -f2` + LIGHTHOUSE_BASE_URL="http://localhost:${LIGHTHOUSE_PORT}" +fi + +if [ -z "$GETH_BASE_URL" ]; then + GETH_CONTAINER=`docker ps -q -f "name=fixturenet-eth-geth-1-1"` + GETH_PORT=`docker port $GETH_CONTAINER 8545 | cut -d':' -f2` + GETH_BASE_URL="http://localhost:${GETH_PORT}" +fi + +function inc_status() { + echo " done" + STATUS=$((STATUS + 1)) + if [ $STATUS -lt ${#STATUSES[@]} ]; then + echo -n "Waiting for ${STATUSES[$STATUS]}..." + fi +} + +echo -n "Waiting for ${STATUSES[$STATUS]}..." +while [ $STATUS -lt ${#STATUSES[@]} ]; do + sleep 1 + echo -n "." + case $STATUS in + 0) + result=`wget --no-check-certificate --quiet -O - --method POST --header 'Content-Type: application/json' \ + --body-data '{ "jsonrpc": "2.0", "id": 1, "method": "eth_getBlockByNumber", "params": ["0x3", false] }' $GETH_BASE_URL | jq -r '.result'` + if [ ! -z "$result" ] && [ "null" != "$result" ]; then + inc_status + fi + ;; + 1) + result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.data.message.slot'` + if [ ! -z "$result" ] && [ $result -gt 0 ]; then + inc_status + fi + ;; + 2) + result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.version'` + if [ ! -z "$result" ] && ([ "$result" == "altair" ] || [ "$result" == "bellatrix" ]); then + inc_status + fi + ;; + 3) + result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.version'` + if [ ! -z "$result" ] && [ "$result" == "bellatrix" ]; then + inc_status + fi + ;; + 4) + result=`wget --no-check-certificate --quiet -O - "$LIGHTHOUSE_BASE_URL/eth/v2/beacon/blocks/head" | jq -r '.data.message.body.execution_payload.block_number'` + if [ ! -z "$result" ] && [ $result -gt 0 ]; then + inc_status + fi + ;; + esac +done diff --git a/build/lib/app/data/container-build/cerc-foundry/build.sh b/build/lib/app/data/container-build/cerc-foundry/build.sh new file mode 100755 index 00000000..f80276eb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-foundry/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build a local version of the foundry-rs/foundry image +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/foundry:local -f ${CERC_REPO_BASE_DIR}/foundry/Dockerfile-debian ${build_command_args} ${CERC_REPO_BASE_DIR}/foundry diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/Dockerfile b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/Dockerfile new file mode 100644 index 00000000..3f73abc7 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/Dockerfile @@ -0,0 +1,21 @@ +# Note: cerc/foundry is Debian based +FROM cerc/foundry:local + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq curl netcat + +WORKDIR /root + +ARG GENESIS_FILE_PATH=genesis.json + +COPY stateful ./stateful +COPY start-private-network.sh . +COPY deploy-local-network.sh . +COPY $GENESIS_FILE_PATH ./genesis.json +# TODO: figure out if this works for aarm64 +COPY --from=cerc/go-ethereum:local /usr/local/bin/geth /bin/geth +RUN chmod +x /bin/geth + +EXPOSE 8545 +EXPOSE 8546 +ENTRYPOINT ["./start-private-network.sh"] diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/build.sh b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/build.sh new file mode 100755 index 00000000..41781112 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/go-ethereum-foundry + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/go-ethereum-foundry:local --build-arg GENESIS_FILE_PATH=genesis-automine.json ${build_command_args} ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh new file mode 100755 index 00000000..e286bf30 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/deploy-local-network.sh @@ -0,0 +1,220 @@ +#!/bin/bash +set -e + +OPTS="./deploy-local-network.sh [] ... +./deploy-local-network.sh --help +-- +db-user=name database user +db-password=password database password +db-name=name database name +db-host=address database host +db-port=port database port +db-write=bool turn on database write mode +db-type=name the type of database +db-driver=name the driver used for the database +db-waitforsync=bool Should the statediff service start once geth has synced to head (default: false) +rpc-port=port change RPC port (default: 8545) +rpc-addr=address change RPC address (default: 127.0.0.1) +chain-id=number change chain ID (default: 99) +extra-args=name extra args to pass to geth on startup +period=seconds use a block time instead of instamine +accounts=number create multiple accounts (default: 1) +address=address eth address to add to genesis +save=name after finishing, save snapshot +load=name start from a previously saved snapshot +dir=directory testnet directory +" + +eval "$( + git rev-parse --parseopt -- "$@" <<<"$OPTS" || echo exit $? +)" + +DB_USER=vdbm +DB_PASSWORD=password +DB_NAME=cerc_public +DB_HOST=127.0.0.1 +DB_PORT=5432 +DB_TYPE=postgres +DB_DRIVER=sqlx +DB_WAIT_FOR_SYNC=false +RPC_PORT=8545 +RPC_ADDRESS=127.0.0.1 +PERIOD=0 +CHAINID=99 +ACCOUNTS=0 +ADDRESS= +EXTRA_START_ARGS= +gethdir=$HOME/testnet + +while [[ $1 ]]; do + case $1 in + --) shift; break;; + --db-user) shift; DB_USER=$1;; + --db-password) shift; DB_PASSWORD=$1;; + --db-name) shift; DB_NAME=$1;; + --db-host) shift; DB_HOST=$1;; + --db-port) shift; DB_PORT=$1;; + --db-write) shift; DB_WRITE=$1;; + --db-type) shift; DB_TYPE=$1;; + --db-driver) shift; DB_DRIVER=$1;; + --db-waitforsync) shift; DB_WAIT_FOR_SYNC=$1;; + --rpc-port) shift; RPC_PORT=$1;; + --rpc-addr) shift; RPC_ADDRESS=$1;; + --chain-id) shift; CHAINID=$1;; + --extra-args) shift; EXTRA_START_ARGS=$1;; + --period) shift; PERIOD=$1;; + --accounts) shift; ACCOUNTS=$1;; + --save) shift; SAVE=$1;; + --address) shift; ADDRESS=$1;; + --load) shift; LOAD=$1;; + --dir) shift; gethdir=$1;; + *) printf "${0##*/}: internal error: %q\\n" "$1"; exit 1 + esac; shift +done + +mkdir -p "$gethdir/config/" + +# Set a password +if [[ ! -f "$gethdir/config/password" ]] +then + echo "password" > "$gethdir/config/password" +fi + +# Create a genesis file if there is no existing chain. +if [[ ! -f "$gethdir/config/genesis.json" ]] +then +for i in $(seq 0 "$ACCOUNTS"); do + address+=( "$( + geth 2>/dev/null account new --datadir "$gethdir" --password=$gethdir/config/password \ + | grep -o -E "0x[A-Fa-f0-9]*" )" ) + balance+=(' "'"${address[i]}"'": { "balance": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}') + EXTRA_DATA="0x3132333400000000000000000000000000000000000000000000000000000000${address[0]#0x}0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +done +if [[ "$USE_GENESIS" != "true" ]] + then + echo "NOT USING GENESIS FILE!!" + echo "USE_GENESIS = $USE_GENESIS" + JSON_VAL='{ + "config": { + "chainId": '"$CHAINID"', + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "clique": { + "period": '"$PERIOD"', + "epoch": 3000 + } + }, + "difficulty": "0x1", + "gaslimit": "0xffffffffffff", + "extraData": "'"$EXTRA_DATA"'", + "alloc": {'"$balance"'} + }' + echo $JSON_VAL | jq . > $gethdir/config/genesis.json + + geth 2>/dev/null --datadir "$gethdir" init "$gethdir/config/genesis.json" + printf "%s\n" "${address[@]}" > "$gethdir/config/account" + else + echo "Using local genesis file" + jq '. + {"extraData": "'"$EXTRA_DATA"'"} + {"alloc": {'"$balance"'}}' ./genesis.json> "$gethdir/config/genesis.json" + geth 2>/dev/null --datadir "$gethdir" init "$gethdir/config/genesis.json" + printf "%s\n" "${address[@]}" > "$gethdir/config/account" + fi +else + address=( $(cat $gethdir/config/account) ) +fi + +export ETH_RPC_URL=http://$RPC_ADDRESS:$RPC_PORT + +port=$((RPC_PORT + 30000)) + +geth version +echo >&2 "testnet: RPC URL: $ETH_RPC_URL" +echo >&2 "testnet: DB ADDRESS: $DB_HOST" +echo >&2 "testnet: TCP port: $port" +echo >&2 "testnet: Chain ID: $CHAINID" +echo >&2 "testnet: Database: $gethdir" +echo >&2 "testnet: Geth log: $gethdir/geth.log" + +echo "$ETH_RPC_URL" > "$gethdir/config/rpc-url" +echo "$port" > "$gethdir/config/node-port" + +set +m +# Uncomment below once waitforsync has been merged +# geth \ +# 2> >(tee "$gethdir/geth.log" | grep --line-buffered Success | sed 's/^/geth: /' >&2) \ +# --datadir "$gethdir" --networkid "$CHAINID" --port="$port" \ +# --mine --miner.threads=1 --allow-insecure-unlock \ +# --http --http.api "web3,eth,net,debug,personal,statediff" --http.corsdomain '*' --http.vhosts '*' --nodiscover \ +# --http.addr="$RPC_ADDRESS" --http.port="$RPC_PORT" --syncmode=full --gcmode=archive \ +# --statediff --statediff.db.host="$DB_HOST" --statediff.db.port="$DB_PORT" --statediff.db.user="$DB_USER" \ +# --statediff.db.password="$DB_PASSWORD" --statediff.db.name="$DB_NAME" \ +# --statediff.db.nodeid 1 --statediff.db.clientname test1 --statediff.writing="$DB_WRITE" \ +# --statediff.db.type="$DB_TYPE" --statediff.db.driver="$DB_DRIVER" --statediff.waitforsync="$DB_WAIT_FOR_SYNC" \ +# --ws --ws.addr="0.0.0.0" --unlock="$(IFS=,; echo "${address[*]}")" --password=<(exit) & + +echo "Starting Geth with following flags" +echo \ + 2> >(tee "$gethdir/geth.log" | grep --line-buffered Success | sed 's/^/geth: /' >&2) \ + --datadir "$gethdir" --networkid "$CHAINID" --port="$port" \ + --mine --miner.threads=1 --allow-insecure-unlock \ + --http --http.api "admin,debug,eth,miner,net,personal,txpool,web3,statediff" --http.corsdomain '*' --http.vhosts '*' --nodiscover \ + --http.addr="$RPC_ADDRESS" --http.port="$RPC_PORT" --syncmode=full --gcmode=archive \ + --statediff --statediff.db.host="$DB_HOST" --statediff.db.port="$DB_PORT" --statediff.db.user="$DB_USER" \ + --statediff.db.password="$DB_PASSWORD" --statediff.db.name="$DB_NAME" \ + --statediff.db.nodeid 1 --statediff.db.clientname test1 --statediff.writing="$DB_WRITE" \ + --statediff.db.type="$DB_TYPE" --statediff.db.driver="$DB_DRIVER" \ + --ws --ws.addr="0.0.0.0" --ws.origins '*' --ws.api=admin,debug,eth,miner,net,personal,txpool,web3 \ + --nat=none --miner.gasprice 16000000000 --nat=none \ + --unlock="$(IFS=,; echo "${address[*]}")" --password="$gethdir/config/password" \ + $EXTRA_START_ARGS & +geth \ + 2> >(tee "$gethdir/geth.log" | grep --line-buffered Success | sed 's/^/geth: /' >&2) \ + --datadir "$gethdir" --networkid "$CHAINID" --port="$port" \ + --mine --miner.threads=1 --allow-insecure-unlock \ + --http --http.api "admin,debug,eth,miner,net,personal,txpool,web3,statediff" --http.corsdomain '*' --http.vhosts '*' --nodiscover \ + --http.addr="$RPC_ADDRESS" --http.port="$RPC_PORT" --syncmode=full --gcmode=archive \ + --statediff --statediff.db.host="$DB_HOST" --statediff.db.port="$DB_PORT" --statediff.db.user="$DB_USER" \ + --statediff.db.password="$DB_PASSWORD" --statediff.db.name="$DB_NAME" \ + --statediff.db.nodeid 1 --statediff.db.clientname test1 --statediff.writing="$DB_WRITE" \ + --statediff.db.type="$DB_TYPE" --statediff.db.driver="$DB_DRIVER" \ + --ws --ws.addr="0.0.0.0" --ws.origins '*' --ws.api=admin,debug,eth,miner,net,personal,txpool,web3 \ + --nat=none --miner.gasprice 16000000000 --nat=none \ + --unlock="$(IFS=,; echo "${address[*]}")" --password="$gethdir/config/password" \ + $EXTRA_START_ARGS & + +gethpid=$! +echo "Geth started" +echo "Geth PID: $gethpid" + +clean() { + ( set -x; kill -INT $gethpid; wait ) + if [[ $SAVE ]]; then + echo >&2 "testnet: saving $gethdir/snapshots/$SAVE" + mkdir -p "$gethdir/snapshots/$SAVE" + cp -r "$gethdir/keystore" "$gethdir/snapshots/$SAVE" + cp -r "$gethdir/config" "$gethdir/snapshots/$SAVE" + geth >/dev/null 2>&1 --datadir "$gethdir" \ + export "$gethdir/snapshots/$SAVE/backup" + fi +} +trap clean EXIT + +echo "Curling: $ETH_RPC_URL" +until curl -s "$ETH_RPC_URL"; do sleep 1; done + +echo "Curling: $ETH_RPC_URL complete" +export ETH_KEYSTORE=$gethdir/keystore +export ETH_PASSWORD=$gethdir/config/password + +printf 'testnet: Account: %s (default)\n' "${address[0]}" >&2 + +[[ "${#address[@]}" -gt 1 ]] && printf 'testnet: Account: %s\n' "${address[@]:1}" >&2 + +echo "Geth Start up completed!" +while true; do sleep 3600; done diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json new file mode 100644 index 00000000..6cbf7f26 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis-automine.json @@ -0,0 +1,19 @@ +{ + "config": { + "chainId": 99, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "clique": { + "period": 5, + "epoch": 3000 + } + }, + "difficulty": "0x1", + "gaslimit": "0xffffffffffff" +} diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis.json b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis.json new file mode 100644 index 00000000..9319b56d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/genesis.json @@ -0,0 +1,19 @@ +{ + "config": { + "chainId": 99, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "clique": { + "period": 0, + "epoch": 3000 + } + }, + "difficulty": "0x1", + "gaslimit": "0xffffffffffff" +} diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh new file mode 100755 index 00000000..58bc090d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/start-private-network.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +set -ex + +# clean up +trap 'killall geth' EXIT +trap "exit 1" SIGINT SIGTERM + +ETHDIR="/root/ethereum" +mkdir -p $ETHDIR +/bin/bash deploy-local-network.sh --rpc-addr 0.0.0.0 --db-user $DB_USER --db-password $DB_PASSWORD --db-name $DB_NAME \ + --db-host $DB_HOST --db-port $DB_PORT --db-write $DB_WRITE --dir "$ETHDIR" --address $ADDRESS \ + --db-type $DB_TYPE --db-driver $DB_DRIVER --db-waitforsync $DB_WAIT_FOR_SYNC --chain-id $CHAIN_ID --extra-args "$EXTRA_START_ARGS" & + +# give it a few secs to start up +COUNT=0 +ATTEMPTS=15 +until $(nc -v localhost 8545) || [[ $COUNT -eq $ATTEMPTS ]]; do echo -e "$(( COUNT++ ))... \c"; sleep 10; done +[[ $COUNT -eq $ATTEMPTS ]] && echo "Could not connect to localhost 8545" && (exit 1) + +# Run tests +cd stateful +forge build +forge test --fork-url http://localhost:8545 + +# Deploy contracts + +ETH_KEYSTORE_FILES=() +echo "ETH KEYSTORE: $ETHDIR/keystore" +for entry in `ls $ETHDIR/keystore`; do + ETH_KEYSTORE_FILES+=("${ETHDIR}/keystore/${entry}") +done + +echo "ETH_KEYSTORE_FILES: $ETH_KEYSTORE_FILES" +ETH_KEYSTORE_FILE=${ETH_KEYSTORE_FILES[0]} + +mkdir -p ~/transaction_info +echo $ETH_KEYSTORE_FILE > ~/transaction_info/CURRENT_ETH_KEYSTORE_FILE +echo $ETHDIR > ~/transaction_info/ETHDIR + +if [ "${#ETH_KEYSTORE_FILES[@]}" -eq 1 ]; then + echo "Only one KEYSTORE" +else + echo "WARNING: More than one file in keystore: ${ETH_KEYSTORE_FILES}" +fi + +DEPLOYED_ADDRESS=$(forge create --keystore $(cat ~/transaction_info/CURRENT_ETH_KEYSTORE_FILE) --rpc-url http://127.0.0.1:8545 --constructor-args 1 --password $(cat ${ETHDIR}/config/password) --legacy /root/stateful/src/Stateful.sol:Stateful | grep "Deployed to:" | cut -d " " -f 3) +echo "Contract has been deployed to: $DEPLOYED_ADDRESS" + +echo $DEPLOYED_ADDRESS > ~/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS +# Call a transaction + +#TX_OUT=$(cast send --keystore $ETH_KEYSTORE_FILE --rpc-url http://127.0.0.1:8545 --password "" --legacy $DEPLOYED_ADDRESS "off()") +TX_OUT=$(cast send --keystore $(cat ~/transaction_info/CURRENT_ETH_KEYSTORE_FILE) --rpc-url http://127.0.0.1:8545 --password $(cat $(cat ~/transaction_info/ETHDIR)/config/password) --legacy $(cat ~/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS) "inc()") +echo 'cast send --keystore $(cat ~/transaction_info/CURRENT_ETH_KEYSTORE_FILE) --rpc-url http://127.0.0.1:8545 --password $(cat $(cat ~/transaction_info/ETHDIR)/config/password) --legacy $(cat ~/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS) "inc()" ' > ~/transaction_info/NEW_TRANSACTION +# Simply run the command below whenever you want to call the smart contract and create a new block +chmod +x ~/transaction_info/NEW_TRANSACTION + + +echo "TX OUTPUT: $TX_OUT" + + +# Run forever +tail -f /dev/null diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml new file mode 100644 index 00000000..19903e0b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/foundry.toml @@ -0,0 +1,7 @@ +[default] +src = 'src' +out = 'out' +libs = ['lib'] +remappings = ['ds-test/=lib/ds-test/src/'] + +# See more config options https://github.com/gakonst/foundry/tree/master/config \ No newline at end of file diff --git a/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol new file mode 100644 index 00000000..137f9a5a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum-foundry/stateful/src/Stateful.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.10; + +contract Stateful { + uint x; + + constructor(uint y) public { + x = y; + } + + function off() public { + require(x == 1); + x = 0; + } + + function on() public { + require(x == 0); + x = 1; + } + function inc() public { + x = x + 1; + } +} \ No newline at end of file diff --git a/build/lib/app/data/container-build/cerc-go-ethereum/build.sh b/build/lib/app/data/container-build/cerc-go-ethereum/build.sh new file mode 100755 index 00000000..171d0079 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-go-ethereum/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/go-ethereum +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/go-ethereum:local ${build_command_args} ${CERC_REPO_BASE_DIR}/go-ethereum diff --git a/build/lib/app/data/container-build/cerc-ipld-eth-beacon-db/build.sh b/build/lib/app/data/container-build/cerc-ipld-eth-beacon-db/build.sh new file mode 100755 index 00000000..47875ec3 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-ipld-eth-beacon-db/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/ipld-eth-beacon-db +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/ipld-eth-beacon-db:local ${build_command_args} ${build_command_args} ${CERC_REPO_BASE_DIR}/ipld-eth-beacon-db diff --git a/build/lib/app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh b/build/lib/app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh new file mode 100755 index 00000000..d304bf3f --- /dev/null +++ b/build/lib/app/data/container-build/cerc-ipld-eth-beacon-indexer/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/ipld-eth-beacon-indexer +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/ipld-eth-beacon-indexer:local ${build_command_args} ${CERC_REPO_BASE_DIR}/ipld-eth-beacon-indexer diff --git a/build/lib/app/data/container-build/cerc-ipld-eth-db/build.sh b/build/lib/app/data/container-build/cerc-ipld-eth-db/build.sh new file mode 100755 index 00000000..d454f3f8 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-ipld-eth-db/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/ipld-eth-db +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/ipld-eth-db:local ${build_command_args} ${CERC_REPO_BASE_DIR}/ipld-eth-db diff --git a/build/lib/app/data/container-build/cerc-ipld-eth-server/build.sh b/build/lib/app/data/container-build/cerc-ipld-eth-server/build.sh new file mode 100755 index 00000000..04c3907e --- /dev/null +++ b/build/lib/app/data/container-build/cerc-ipld-eth-server/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/ipld-eth-server +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/ipld-eth-server:local ${build_command_args} ${CERC_REPO_BASE_DIR}/ipld-eth-server diff --git a/build/lib/app/data/container-build/cerc-keycloak/Dockerfile b/build/lib/app/data/container-build/cerc-keycloak/Dockerfile new file mode 100644 index 00000000..a366b4eb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-keycloak/Dockerfile @@ -0,0 +1,4 @@ +FROM quay.io/keycloak/keycloak:20.0 +WORKDIR /opt/keycloak/providers +RUN curl -L https://github.com/aerogear/keycloak-metrics-spi/releases/download/2.5.3/keycloak-metrics-spi-2.5.3.jar --output keycloak-metrics-spi.jar +RUN curl -L https://github.com/cerc-io/keycloak-api-key-demo/releases/download/v0.3/api-key-module-0.3.jar --output api-key-module.jar diff --git a/build/lib/app/data/container-build/cerc-keycloak/build.sh b/build/lib/app/data/container-build/cerc-keycloak/build.sh new file mode 100755 index 00000000..be8a7d11 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-keycloak/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# +#Build cerc/keycloack + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/keycloak:local ${build_command_args} ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-laconic-console-host/Dockerfile b/build/lib/app/data/container-build/cerc-laconic-console-host/Dockerfile new file mode 100644 index 00000000..7e3fc46b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-console-host/Dockerfile @@ -0,0 +1,69 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=18-bullseye +FROM node:${VARIANT} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# This container pulls npm packages from a local registry configured via these env vars +ARG CERC_NPM_REGISTRY_URL +ARG CERC_NPM_AUTH_TOKEN + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + && npm cache clean --force > /dev/null 2>&1 + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq + +# [Optional] Uncomment if you want to install an additional version of node using nvm +# ARG EXTRA_NODE_VERSION=10 +# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" + +# We do this to get a yq binary from the published container, for the correct architecture we're building here +COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq + +RUN mkdir -p /scripts +COPY ./apply-webapp-config.sh /scripts +COPY ./start-serving-app.sh /scripts + +# [Optional] Uncomment if you want to install more global node modules +# RUN su node -c "npm install -g " + +# Configure the local npm registry +RUN npm config set @cerc-io:registry ${CERC_NPM_REGISTRY_URL} \ + && npm config set @lirewine:registry ${CERC_NPM_REGISTRY_URL} \ + && npm config set -- ${CERC_NPM_REGISTRY_URL}:_authToken ${CERC_NPM_AUTH_TOKEN} + +RUN mkdir -p /config +COPY ./config.yml /config + +# Install simple web server for now (use nginx perhaps later) +RUN yarn global add http-server + +# Globally install the payload web app package +RUN yarn global add @cerc-io/console-app + +# Expose port for http +EXPOSE 80 + +# Default command sleeps forever so docker doesn't kill it +CMD ["/scripts/start-serving-app.sh"] diff --git a/build/lib/app/data/container-build/cerc-laconic-console-host/apply-webapp-config.sh b/build/lib/app/data/container-build/cerc-laconic-console-host/apply-webapp-config.sh new file mode 100755 index 00000000..bf041708 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-console-host/apply-webapp-config.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +if [[ $# -ne 2 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +config_file_name=$1 +webapp_files_dir=$2 +if ! [[ -f ${config_file_name} ]]; then + echo "Config file ${config_file_name} does not exist" >&2 + exit 1 +fi +if ! [[ -d ${webapp_files_dir} ]]; then + echo "Webapp directory ${webapp_files_dir} does not exist" >&2 + exit 1 +fi +# First some magic using yq to translate our yaml config file into an array of key value pairs like: +# LACONIC_HOSTED_CONFIG_= +readarray -t config_kv_pair_array < <( yq '.. | select(length > 2) | ([path | join("_"), .] | join("=") )' ${config_file_name} | sed 's/^/LACONIC_HOSTED_CONFIG_/' ) +declare -p config_kv_pair_array +# Then iterate over that kv array making the template substitution in our web app files +for kv_pair_string in "${config_kv_pair_array[@]}" +do + kv_pair=(${kv_pair_string//=/ }) + template_string_to_replace=${kv_pair[0]} + template_value_to_substitute=${kv_pair[1]} + template_value_to_substitute_expanded=${template_value_to_substitute//LACONIC_HOSTED_ENDPOINT/${LACONIC_HOSTED_ENDPOINT}} + # Run find and sed to do the substitution of one variable over all files + # See: https://stackoverflow.com/a/21479607/1701505 + echo "Substituting: ${template_string_to_replace} = ${template_value_to_substitute_expanded}" + # Note: we do not escape our strings, on the expectation they do not container the '#' char. + find ${webapp_files_dir} -type f -exec sed -i 's#'${template_string_to_replace}'#'${template_value_to_substitute_expanded}'#g' {} + +done diff --git a/build/lib/app/data/container-build/cerc-laconic-console-host/build.sh b/build/lib/app/data/container-build/cerc-laconic-console-host/build.sh new file mode 100755 index 00000000..77a38917 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-console-host/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Build cerc/laconic-registry-cli + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/laconic-console-host:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile \ + --add-host gitea.local:host-gateway \ + --build-arg CERC_NPM_AUTH_TOKEN --build-arg CERC_NPM_REGISTRY_URL ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-laconic-console-host/config.yml b/build/lib/app/data/container-build/cerc-laconic-console-host/config.yml new file mode 100644 index 00000000..d557ace5 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-console-host/config.yml @@ -0,0 +1,6 @@ +# Config for laconic-console running in a fixturenet with laconicd + +services: + wns: + server: 'LACONIC_HOSTED_ENDPOINT:9473/api' + webui: 'LACONIC_HOSTED_ENDPOINT:9473/console' diff --git a/build/lib/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh b/build/lib/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh new file mode 100755 index 00000000..a322e5fb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-console-host/start-serving-app.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +# TODO: Don't hard wire this: +webapp_files_dir=/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production +/scripts/apply-webapp-config.sh /config/config.yml ${webapp_files_dir} +http-server -p 80 ${webapp_files_dir} diff --git a/build/lib/app/data/container-build/cerc-laconic-registry-cli/Dockerfile b/build/lib/app/data/container-build/cerc-laconic-registry-cli/Dockerfile new file mode 100644 index 00000000..7859adb4 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-registry-cli/Dockerfile @@ -0,0 +1,63 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=18-bullseye +FROM node:${VARIANT} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# This container pulls npm packages from a local registry configured via these env vars +ARG CERC_NPM_REGISTRY_URL +ARG CERC_NPM_AUTH_TOKEN + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + && npm cache clean --force > /dev/null 2>&1 + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq + +# [Optional] Uncomment if you want to install an additional version of node using nvm +# ARG EXTRA_NODE_VERSION=10 +# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}" + +# [Optional] Uncomment if you want to install more global node modules +# RUN su node -c "npm install -g " + +# Configure the local npm registry +RUN npm config set @cerc-io:registry ${CERC_NPM_REGISTRY_URL} \ + && npm config set @lirewine:registry ${CERC_NPM_REGISTRY_URL} \ + && npm config set -- ${CERC_NPM_REGISTRY_URL}:_authToken ${CERC_NPM_AUTH_TOKEN} + +# TODO: the image at this point could be made a base image for several different CLI images +# that install different Node-based CLI commands + +# Globally install the cli package +RUN yarn global add @cerc-io/laconic-registry-cli + +# Add scripts +RUN mkdir /scripts +ENV PATH="${PATH}:/scripts" +COPY ./create-demo-records.sh /scripts +COPY ./import-key.sh /scripts +COPY ./import-address.sh /scripts + +# Default command sleeps forever so docker doesn't kill it +CMD ["sh", "-c", "while :; do sleep 600; done"] diff --git a/build/lib/app/data/container-build/cerc-laconic-registry-cli/build.sh b/build/lib/app/data/container-build/cerc-laconic-registry-cli/build.sh new file mode 100755 index 00000000..c9379856 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-registry-cli/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Build cerc/laconic-registry-cli + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/laconic-registry-cli:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile \ + --add-host gitea.local:host-gateway \ + --build-arg CERC_NPM_AUTH_TOKEN --build-arg CERC_NPM_REGISTRY_URL ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh b/build/lib/app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh new file mode 100755 index 00000000..0d90da8d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Create some demo/test records in the registry +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +registry_command="laconic cns" +record_1_filename=demo-record-1.yml +cat < ${record_1_filename} +record: + type: WebsiteRegistrationRecord + url: 'https://cerc.io' + repo_registration_record_cid: QmSnuWmxptJZdLJpKRarxBMS2Ju2oANVrgbr2xWbie9b2D + build_artifact_cid: QmP8jTG1m9GSDJLCbeWhVSVgEzCPPwXRdCRuJtQ5Tz9Kc9 + tls_cert_cid: QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR + version: 1.0.23 +EOF +# Check we have funds +funds_response=$(${registry_command} account get --address $(cat my-address.txt)) +funds_balance=$(echo ${funds_response} | jq -r .[0].balance[0].quantity) +echo "Balance is: ${funds_balance}" +# Create a bond +bond_create_result=$(${registry_command} bond create --type aphoton --quantity 1000000000) +bond_id=$(echo ${bond_create_result} | jq -r .bondId) +echo "Created bond with id: ${bond_id}" +# Publish a demo record +publish_response=$(${registry_command} record publish --filename ${record_1_filename} --bond-id ${bond_id}) +published_record_id=$(echo ${publish_response} | jq -r .id) +echo "Published ${record_1_filename} with id: ${published_record_id}" diff --git a/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-address.sh b/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-address.sh new file mode 100644 index 00000000..06b5d08d --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-address.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo ${1} > my-address.txt diff --git a/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-key.sh b/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-key.sh new file mode 100644 index 00000000..3bafec4b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconic-registry-cli/import-key.sh @@ -0,0 +1,2 @@ +#!/bin/sh +sed 's/REPLACE_WITH_MYKEY/'${1}'/' registry-cli-config-template.yml > config.yml diff --git a/build/lib/app/data/container-build/cerc-laconicd/build.sh b/build/lib/app/data/container-build/cerc-laconicd/build.sh new file mode 100755 index 00000000..3d897446 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-laconicd/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/laconicd +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd \ No newline at end of file diff --git a/build/lib/app/data/container-build/cerc-lighthouse/Dockerfile b/build/lib/app/data/container-build/cerc-lighthouse/Dockerfile new file mode 100644 index 00000000..7d4fe5d8 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-lighthouse/Dockerfile @@ -0,0 +1,8 @@ +FROM sigp/lighthouse:v4.0.1-modern + +RUN apt-get update; apt-get install bash netcat curl less jq -y; + +WORKDIR /root/ +ADD start-lighthouse.sh . + +ENTRYPOINT [ "./start-lighthouse.sh" ] diff --git a/build/lib/app/data/container-build/cerc-lighthouse/build.sh b/build/lib/app/data/container-build/cerc-lighthouse/build.sh new file mode 100755 index 00000000..2e9cfe3c --- /dev/null +++ b/build/lib/app/data/container-build/cerc-lighthouse/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/lighthouse + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/lighthouse:local ${build_command_args} ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-lighthouse/start-lighthouse.sh b/build/lib/app/data/container-build/cerc-lighthouse/start-lighthouse.sh new file mode 100755 index 00000000..f254ada9 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-lighthouse/start-lighthouse.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# This bash script will be used to start the lighthouse client +# The 0.0.0.0 is not safe. + +lighthouse bn \ + --http --http-address 0.0.0.0 --metrics --private --network $NETWORK & + +tail -f /dev/null diff --git a/build/lib/app/data/container-build/cerc-lotus/Dockerfile b/build/lib/app/data/container-build/cerc-lotus/Dockerfile new file mode 100644 index 00000000..963de704 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-lotus/Dockerfile @@ -0,0 +1,138 @@ +##################################### +FROM golang:1.19.7-buster AS lotus-builder +MAINTAINER Lotus Development Team + +RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev + +ENV XDG_CACHE_HOME="/tmp" + +### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH \ + RUST_VERSION=1.63.0 + +RUN set -eux; \ + dpkgArch="$(dpkg --print-architecture)"; \ + case "${dpkgArch##*-}" in \ + amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \ + arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \ + *) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \ + esac; \ + url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \ + wget "$url"; \ + echo "${rustupSha256} *rustup-init" | sha256sum -c -; \ + chmod +x rustup-init; \ + ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \ + rm rustup-init; \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ + rustup --version; \ + cargo --version; \ + rustc --version; + +COPY ./ /opt/filecoin +WORKDIR /opt/filecoin + +#RUN scripts/docker-git-state-check.sh + +### make configurable filecoin-ffi build +ARG FFI_BUILD_FROM_SOURCE=0 +ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} + +RUN make clean deps + +ARG RUSTFLAGS="" +ARG GOFLAGS="" + +#RUN make buildall +RUN make 2k + +##################################### +FROM ubuntu:20.04 AS lotus-base +MAINTAINER Lotus Development Team + +# Base resources +COPY --from=lotus-builder /etc/ssl/certs /etc/ssl/certs +COPY --from=lotus-builder /lib/*/libdl.so.2 /lib/ +COPY --from=lotus-builder /lib/*/librt.so.1 /lib/ +COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/ +COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/ +COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/ +COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/ +COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/ +COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/ + +RUN useradd -r -u 532 -U fc \ + && mkdir -p /etc/OpenCL/vendors \ + && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd + +##################################### +FROM lotus-base AS lotus +MAINTAINER Lotus Development Team + +COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ +#COPY scripts/docker-lotus-entrypoint.sh / +#COPY myscripts/setup-node.sh /docker-entrypoint-scripts.d/setup-node.sh + +ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT} +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_PATH /var/lib/lotus +ENV DOCKER_LOTUS_IMPORT_WALLET "" + +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus +VOLUME /var/tmp/filecoin-proof-parameters + +USER fc + +EXPOSE 1234 + +ENTRYPOINT ["/docker-lotus-entrypoint.sh"] + +CMD ["-help"] + +##################################### +FROM lotus-base AS lotus-all-in-one + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV WALLET_PATH /var/lib/lotus-wallet + +COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ +#COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ + +RUN mkdir /var/tmp/filecoin-proof-parameters +RUN mkdir /var/lib/lotus +RUN mkdir /var/lib/lotus-miner +RUN mkdir /var/lib/lotus-worker +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus +RUN chown fc: /var/lib/lotus-miner +RUN chown fc: /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-wallet + + +#VOLUME /var/tmp/filecoin-proof-parameters +#VOLUME /var/lib/lotus +#VOLUME /var/lib/lotus-miner +#VOLUME /var/lib/lotus-worker +#VOLUME /var/lib/lotus-wallet + +EXPOSE 1234 +EXPOSE 2345 +EXPOSE 3456 +EXPOSE 1777 diff --git a/build/lib/app/data/container-build/cerc-lotus/build.sh b/build/lib/app/data/container-build/cerc-lotus/build.sh new file mode 100755 index 00000000..10bbf42c --- /dev/null +++ b/build/lib/app/data/container-build/cerc-lotus/build.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Build cerc/lotus +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# Per lotus docs, 'releases' branch always contains latest stable release +git -C ${CERC_REPO_BASE_DIR}/lotus checkout releases + +# Replace repo's Dockerfile with modified one +cp ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/lotus/Dockerfile + +docker build -t cerc/lotus:local ${build_command_args} ${CERC_REPO_BASE_DIR}/lotus diff --git a/build/lib/app/data/container-build/cerc-mobymask-ui/Dockerfile b/build/lib/app/data/container-build/cerc-mobymask-ui/Dockerfile new file mode 100644 index 00000000..651d718f --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask-ui/Dockerfile @@ -0,0 +1,60 @@ +# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile +# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster +ARG VARIANT=16-bullseye +FROM node:${VARIANT} + +ARG USERNAME=node +ARG NPM_GLOBAL=/usr/local/share/npm-global + +# This container pulls npm package from a registry configured via env var +ARG CERC_NPM_REGISTRY_URL + +# Add NPM global to PATH. +ENV PATH=${NPM_GLOBAL}/bin:${PATH} +# Prevents npm from printing version warnings +ENV NPM_CONFIG_UPDATE_NOTIFIER=false + +RUN \ + # Configure global npm install location, use group to adapt to UID/GID changes + if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \ + && usermod -a -G npm ${USERNAME} \ + && umask 0002 \ + && mkdir -p ${NPM_GLOBAL} \ + && touch /usr/local/etc/npmrc \ + && chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \ + && chmod g+s ${NPM_GLOBAL} \ + && npm config -g set prefix ${NPM_GLOBAL} \ + && su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \ + # Install eslint + && su ${USERNAME} -c "umask 0002 && npm install -g eslint" \ + && npm cache clean --force > /dev/null 2>&1 + +# Install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends jq bash netcat + +# We do this to get a yq binary from the published container, for the correct architecture we're building here +COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq + +RUN mkdir -p /scripts +COPY ./apply-webapp-config.sh /scripts +COPY ./start-serving-app.sh /scripts + +# Configure the local npm registry +RUN npm config set @cerc-io:registry ${CERC_NPM_REGISTRY_URL} + +RUN mkdir -p /config + +# Install simple web server for now (use nginx perhaps later) +RUN yarn global add http-server + +# Globally install both versions of the payload web app package +# Install old version of MobyMask web app +RUN yarn global add @cerc-io/mobymask-ui@0.1.3 +# Install the LXDAO version of MobyMask web app +RUN yarn global add @cerc-io/mobymask-ui-lxdao@npm:@cerc-io/mobymask-ui@0.1.3-lxdao-0.1.1 + +# Expose port for http +EXPOSE 80 + +CMD ["/scripts/start-serving-app.sh"] diff --git a/build/lib/app/data/container-build/cerc-mobymask-ui/apply-webapp-config.sh b/build/lib/app/data/container-build/cerc-mobymask-ui/apply-webapp-config.sh new file mode 100755 index 00000000..9f32cd23 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask-ui/apply-webapp-config.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +if [[ $# -ne 3 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +config_file_name=$1 +webapp_files_dir=$2 +config_prefix=$3 +if ! [[ -f ${config_file_name} ]]; then + echo "Config file ${config_file_name} does not exist" >&2 + exit 1 +fi +if ! [[ -d ${webapp_files_dir} ]]; then + echo "Webapp directory ${webapp_files_dir} does not exist" >&2 + exit 1 +fi +# First some magic using sed to translate our yaml config file into an array of key value pairs like: +# ${config_prefix}= +# sed "s/'//g" is used to remove single quote for relayNodes value +readarray -t config_kv_pair_array < <( sed -E 's/([^:]+):\s*(.*)/\1=\2/g' ${config_file_name} | sed "s/'//g" | sed "s/^/${config_prefix}_/" ) +declare -p config_kv_pair_array +# Then iterate over that kv array making the template substitution in our web app files +for kv_pair_string in "${config_kv_pair_array[@]}" +do + kv_pair=(${kv_pair_string//=/ }) + template_string_to_replace=${kv_pair[0]} + template_value_to_substitute=${kv_pair[1]} + # Run find and sed to do the substitution of one variable over all files + # See: https://stackoverflow.com/a/21479607/1701505 + echo "Substituting: ${template_string_to_replace} = ${template_value_to_substitute}" + + # TODO: Pass keys to be replaced without double quotes + if [[ "$template_string_to_replace" =~ ^${config_prefix}_(relayNodes|chainId)$ ]]; then + find ${webapp_files_dir} -type f -exec sed -i 's#"'"${template_string_to_replace}"'"#'"${template_value_to_substitute}"'#g' {} + + else + # Note: we do not escape our strings, on the expectation they do not container the '#' char. + find ${webapp_files_dir} -type f -exec sed -i 's#'${template_string_to_replace}'#'${template_value_to_substitute}'#g' {} + + fi +done diff --git a/build/lib/app/data/container-build/cerc-mobymask-ui/build.sh b/build/lib/app/data/container-build/cerc-mobymask-ui/build.sh new file mode 100755 index 00000000..7628ff5c --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask-ui/build.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Build cerc/mobymask-ui + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +CERC_NPM_REGISTRY_URL="https://git.vdb.to/api/packages/cerc-io/npm/" + +docker build -t cerc/mobymask-ui:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile \ + --build-arg CERC_NPM_REGISTRY_URL ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-mobymask-ui/start-serving-app.sh b/build/lib/app/data/container-build/cerc-mobymask-ui/start-serving-app.sh new file mode 100755 index 00000000..0e11b447 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask-ui/start-serving-app.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# TODO: Don't hard wire this: +webapp_files_dir="/usr/local/share/.config/yarn/global/node_modules/${CERC_BUILD_DIR}" +/scripts/apply-webapp-config.sh /config/config.yml ${webapp_files_dir} MOBYMASK_HOSTED_CONFIG +http-server -p 80 ${webapp_files_dir} diff --git a/build/lib/app/data/container-build/cerc-mobymask/Dockerfile b/build/lib/app/data/container-build/cerc-mobymask/Dockerfile new file mode 100644 index 00000000..3bcebced --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask/Dockerfile @@ -0,0 +1,13 @@ +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add python3 alpine-sdk jq bash curl wget + +WORKDIR /app + +COPY . . + +RUN yarn + +# Add scripts +RUN mkdir /scripts +ENV PATH="${PATH}:/scripts" diff --git a/build/lib/app/data/container-build/cerc-mobymask/build.sh b/build/lib/app/data/container-build/cerc-mobymask/build.sh new file mode 100755 index 00000000..f156846a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-mobymask/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/mobymask + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/mobymask:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/MobyMask diff --git a/build/lib/app/data/container-build/cerc-optimism-contracts/Dockerfile b/build/lib/app/data/container-build/cerc-optimism-contracts/Dockerfile new file mode 100644 index 00000000..ed9c4b22 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-contracts/Dockerfile @@ -0,0 +1,22 @@ +FROM cerc/foundry:local + +# Install node (local foundry is a debian based image) +RUN apt-get update \ + && apt-get install -y curl wget \ + && curl --silent --location https://deb.nodesource.com/setup_16.x | bash - \ + && apt-get update \ + && apt-get install -y nodejs git busybox jq \ + && node -v + +RUN corepack enable \ + && yarn --version + +WORKDIR /app + +# Copy optimism repo contents +COPY . . + +RUN echo "Building optimism" && \ + yarn && yarn build + +WORKDIR /app/packages/contracts-bedrock diff --git a/build/lib/app/data/container-build/cerc-optimism-contracts/build.sh b/build/lib/app/data/container-build/cerc-optimism-contracts/build.sh new file mode 100755 index 00000000..b1ddd819 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-contracts/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/optimism-contracts + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/optimism-contracts:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism diff --git a/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts new file mode 100644 index 00000000..78312499 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts @@ -0,0 +1,28 @@ +import fs from 'fs' + +import { task } from 'hardhat/config' +import { hdkey } from 'ethereumjs-wallet' +import * as bip39 from 'bip39' + +task('rekey-json', 'Generates a new set of keys for a test network') + .addParam('output', 'JSON file to output accounts to') + .setAction(async ({ output: outputFile }) => { + const mnemonic = bip39.generateMnemonic() + const pathPrefix = "m/44'/60'/0'/0" + const labels = ['Admin', 'Proposer', 'Batcher', 'Sequencer'] + const hdwallet = hdkey.fromMasterSeed(await bip39.mnemonicToSeed(mnemonic)) + + const output = {} + + for (let i = 0; i < labels.length; i++) { + const label = labels[i] + const wallet = hdwallet.derivePath(`${pathPrefix}/${i}`).getWallet() + const addr = '0x' + wallet.getAddress().toString('hex') + const pk = wallet.getPrivateKey().toString('hex') + + output[label] = { address: addr, privateKey: pk } + } + + fs.writeFileSync(outputFile, JSON.stringify(output, null, 2)) + console.log(`L2 account keys written to ${outputFile}`) + }) diff --git a/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts new file mode 100644 index 00000000..99bf4f74 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts @@ -0,0 +1,26 @@ +import { task } from 'hardhat/config' +import '@nomiclabs/hardhat-ethers' +import { ethers } from 'ethers' + +task('send-balance', 'Sends Ether to a specified Ethereum account') + .addParam('to', 'The Ethereum address to send Ether to') + .addParam('amount', 'The amount of Ether to send, in Ether') + .addParam('privateKey', 'The private key of the sender') + .setAction(async ({ to, amount, privateKey }, {}) => { + // Open the wallet using sender's private key + const provider = new ethers.providers.JsonRpcProvider(`${process.env.CERC_L1_RPC}`) + const wallet = new ethers.Wallet(privateKey, provider) + + // Send amount to the specified address + const tx = await wallet.sendTransaction({ + to, + value: ethers.utils.parseEther(amount), + }) + const txReceipt = await tx.wait() + + console.log(`Balance sent to: ${to}, from: ${wallet.address}`) + console.log( + `Block: { number: ${txReceipt.blockNumber}, hash: ${txReceipt.blockHash} }` + ) + console.log(`Transaction hash: ${tx.hash}`) + }) diff --git a/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts new file mode 100644 index 00000000..340bc4f3 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts @@ -0,0 +1,30 @@ +import { task } from 'hardhat/config' +import '@nomiclabs/hardhat-ethers' + +task( + 'verify-contract-deployment', + 'Verifies the given contract deployment transaction' +) + .addParam('contract', 'Address of the contract deployed') + .addParam('transactionHash', 'Hash of the deployment transaction') + .setAction(async ({ contract, transactionHash }, { ethers }) => { + const provider = new ethers.providers.JsonRpcProvider( + `${process.env.CERC_L1_RPC}` + ) + + // Get the deployment tx receipt + const receipt = await provider.getTransactionReceipt(transactionHash) + if ( + receipt && + receipt.contractAddress && + receipt.contractAddress === contract + ) { + console.log( + `Deployment for contract ${contract} in transaction ${transactionHash} verified` + ) + process.exit(0) + } else { + console.log(`Contract ${contract} deployment verification failed`) + process.exit(1) + } + }) diff --git a/build/lib/app/data/container-build/cerc-optimism-l2geth/build.sh b/build/lib/app/data/container-build/cerc-optimism-l2geth/build.sh new file mode 100755 index 00000000..86c84a74 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-l2geth/build.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Build cerc/optimism-l2geth + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +docker build -t cerc/optimism-l2geth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/op-geth diff --git a/build/lib/app/data/container-build/cerc-optimism-op-batcher/Dockerfile b/build/lib/app/data/container-build/cerc-optimism-op-batcher/Dockerfile new file mode 100644 index 00000000..23d6b629 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-batcher/Dockerfile @@ -0,0 +1,32 @@ +FROM golang:1.19.0-alpine3.15 as builder + +ARG VERSION=v0.0.0 + +RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash + +# build op-batcher with the shared go.mod & go.sum files +COPY ./op-batcher /app/op-batcher +COPY ./op-bindings /app/op-bindings +COPY ./op-node /app/op-node +COPY ./op-service /app/op-service +COPY ./op-signer /app/op-signer +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum + +COPY ./.git /app/.git + +WORKDIR /app/op-batcher + +RUN go mod download + +ARG TARGETOS TARGETARCH + +RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH + +FROM alpine:3.15 + +RUN apk add --no-cache jq bash + +COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin + +ENTRYPOINT ["op-batcher"] diff --git a/build/lib/app/data/container-build/cerc-optimism-op-batcher/build.sh b/build/lib/app/data/container-build/cerc-optimism-op-batcher/build.sh new file mode 100755 index 00000000..a91c0063 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-batcher/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Build cerc/optimism-op-batcher +# TODO: use upstream Dockerfile once its buildx-specific content has been removed + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/optimism-op-batcher:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism diff --git a/build/lib/app/data/container-build/cerc-optimism-op-node/Dockerfile b/build/lib/app/data/container-build/cerc-optimism-op-node/Dockerfile new file mode 100644 index 00000000..17d273b6 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-node/Dockerfile @@ -0,0 +1,30 @@ +FROM golang:1.19.0-alpine3.15 as builder + +ARG VERSION=v0.0.0 + +RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash + +# build op-node with the shared go.mod & go.sum files +COPY ./op-node /app/op-node +COPY ./op-chain-ops /app/op-chain-ops +COPY ./op-service /app/op-service +COPY ./op-bindings /app/op-bindings +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum +COPY ./.git /app/.git + +WORKDIR /app/op-node + +RUN go mod download + +ARG TARGETOS TARGETARCH + +RUN make op-node VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH + +FROM alpine:3.15 + +RUN apk add --no-cache openssl jq + +COPY --from=builder /app/op-node/bin/op-node /usr/local/bin + +CMD ["op-node"] diff --git a/build/lib/app/data/container-build/cerc-optimism-op-node/build.sh b/build/lib/app/data/container-build/cerc-optimism-op-node/build.sh new file mode 100755 index 00000000..be68cbbd --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-node/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Build cerc/optimism-op-node +# TODO: use upstream Dockerfile once its buildx-specific content has been removed + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/optimism-op-node:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism diff --git a/build/lib/app/data/container-build/cerc-optimism-op-proposer/Dockerfile b/build/lib/app/data/container-build/cerc-optimism-op-proposer/Dockerfile new file mode 100644 index 00000000..e91aa4bb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-proposer/Dockerfile @@ -0,0 +1,31 @@ +FROM golang:1.19.0-alpine3.15 as builder + +ARG VERSION=v0.0.0 + +RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash + +# build op-proposer with the shared go.mod & go.sum files +COPY ./op-proposer /app/op-proposer +COPY ./op-bindings /app/op-bindings +COPY ./op-node /app/op-node +COPY ./op-service /app/op-service +COPY ./op-signer /app/op-signer +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum +COPY ./.git /app/.git + +WORKDIR /app/op-proposer + +RUN go mod download + +ARG TARGETOS TARGETARCH + +RUN make op-proposer VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH + +FROM alpine:3.15 + +RUN apk add --no-cache jq bash + +COPY --from=builder /app/op-proposer/bin/op-proposer /usr/local/bin + +CMD ["op-proposer"] diff --git a/build/lib/app/data/container-build/cerc-optimism-op-proposer/build.sh b/build/lib/app/data/container-build/cerc-optimism-op-proposer/build.sh new file mode 100755 index 00000000..f3c975dc --- /dev/null +++ b/build/lib/app/data/container-build/cerc-optimism-op-proposer/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Build cerc/optimism-op-proposer + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/optimism-op-proposer:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/optimism diff --git a/build/lib/app/data/container-build/cerc-react-peer/Dockerfile b/build/lib/app/data/container-build/cerc-react-peer/Dockerfile new file mode 100644 index 00000000..191f986b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-react-peer/Dockerfile @@ -0,0 +1,29 @@ +FROM node:18.15.0-alpine3.16 + +# This container pulls npm package from a registry configured via env var +ARG CERC_NPM_REGISTRY_URL + +RUN apk --update --no-cache add make git python3 jq bash + +# We do this to get a yq binary from the published container, for the correct architecture we're building here +COPY --from=docker.io/mikefarah/yq:latest /usr/bin/yq /usr/local/bin/yq + +RUN mkdir -p /scripts +COPY ./apply-webapp-config.sh /scripts +COPY ./start-serving-app.sh /scripts + +# Configure the local npm registry +RUN npm config set @cerc-io:registry ${CERC_NPM_REGISTRY_URL} + +RUN mkdir -p /config + +# Install simple web server for now (use nginx perhaps later) +RUN yarn global add http-server + +# Globally install the payload web app package +RUN yarn global add @cerc-io/test-app@0.2.33 + +# Expose port for http +EXPOSE 80 + +CMD ["/scripts/start-serving-app.sh"] diff --git a/build/lib/app/data/container-build/cerc-react-peer/apply-webapp-config.sh b/build/lib/app/data/container-build/cerc-react-peer/apply-webapp-config.sh new file mode 100755 index 00000000..a7f0a28e --- /dev/null +++ b/build/lib/app/data/container-build/cerc-react-peer/apply-webapp-config.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +if [[ $# -ne 3 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +config_file_name=$1 +webapp_files_dir=$2 +config_prefix=$3 +if ! [[ -f ${config_file_name} ]]; then + echo "Config file ${config_file_name} does not exist" >&2 + exit 1 +fi +if ! [[ -d ${webapp_files_dir} ]]; then + echo "Webapp directory ${webapp_files_dir} does not exist" >&2 + exit 1 +fi +# First some magic using sed to translate our yaml config file into an array of key value pairs like: +# ${config_prefix}= +# sed "s/'//g" is used to remove single quote for relayNodes value +readarray -t config_kv_pair_array < <( sed -E 's/([^:]+):\s*(.*)/\1=\2/g' ${config_file_name} | sed "s/'//g" | sed "s/^/${config_prefix}_/" ) +declare -p config_kv_pair_array +# Then iterate over that kv array making the template substitution in our web app files +for kv_pair_string in "${config_kv_pair_array[@]}" +do + kv_pair=(${kv_pair_string//=/ }) + template_string_to_replace=${kv_pair[0]} + template_value_to_substitute=${kv_pair[1]} + # Run find and sed to do the substitution of one variable over all files + # See: https://stackoverflow.com/a/21479607/1701505 + echo "Substituting: ${template_string_to_replace} = ${template_value_to_substitute}" + + # TODO: Pass keys to be replaced without double quotes + if [[ "$template_string_to_replace" == "${config_prefix}_relayNodes" ]]; then + find ${webapp_files_dir} -type f -exec sed -i 's#"'"${template_string_to_replace}"'"#'"${template_value_to_substitute}"'#g' {} + + else + # Note: we do not escape our strings, on the expectation they do not container the '#' char. + find ${webapp_files_dir} -type f -exec sed -i 's#'${template_string_to_replace}'#'${template_value_to_substitute}'#g' {} + + fi +done diff --git a/build/lib/app/data/container-build/cerc-react-peer/build.sh b/build/lib/app/data/container-build/cerc-react-peer/build.sh new file mode 100755 index 00000000..7078b754 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-react-peer/build.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Build cerc/react-peer + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +CERC_NPM_REGISTRY_URL="https://git.vdb.to/api/packages/cerc-io/npm/" + +docker build -t cerc/react-peer:local ${build_command_args} -f ${SCRIPT_DIR}/Dockerfile \ + --build-arg CERC_NPM_REGISTRY_URL ${SCRIPT_DIR} diff --git a/build/lib/app/data/container-build/cerc-react-peer/start-serving-app.sh b/build/lib/app/data/container-build/cerc-react-peer/start-serving-app.sh new file mode 100755 index 00000000..e01b91c3 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-react-peer/start-serving-app.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +# TODO: Don't hard wire this: +webapp_files_dir=/usr/local/share/.config/yarn/global/node_modules/@cerc-io/test-app/build +/scripts/apply-webapp-config.sh /config/config.yml ${webapp_files_dir} MOBYMASK_HOSTED_CONFIG +http-server -p 80 ${webapp_files_dir} diff --git a/build/lib/app/data/container-build/cerc-test-container/Dockerfile b/build/lib/app/data/container-build/cerc-test-container/Dockerfile new file mode 100644 index 00000000..f4ef5506 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-test-container/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:latest + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \ + apt-get install -y software-properties-common && \ + apt-get install -y nginx && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +EXPOSE 80 + +COPY run.sh /app/run.sh + +ENTRYPOINT ["/app/run.sh"] diff --git a/build/lib/app/data/container-build/cerc-test-container/build.sh b/build/lib/app/data/container-build/cerc-test-container/build.sh new file mode 100755 index 00000000..ee56576a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-test-container/build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Build cerc/test-container +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +docker build -t cerc/test-container:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR \ No newline at end of file diff --git a/build/lib/app/data/container-build/cerc-test-container/run.sh b/build/lib/app/data/container-build/cerc-test-container/run.sh new file mode 100755 index 00000000..b4757d9a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-test-container/run.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +# Test if the container's filesystem is old (run previously) or new +EXISTSFILENAME=/var/exists +echo "Test container starting" +if [[ -f "$EXISTSFILENAME" ]]; +then + TIMESTAMP = `cat $EXISTSFILENAME` + echo "Filesystem is old, created: $TIMESTAMP" +else + echo "Filesystem is fresh" + echo `date` > $EXISTSFILENAME +fi + +# Run nginx which will block here forever +/usr/sbin/nginx -g "daemon off;" diff --git a/build/lib/app/data/container-build/cerc-test-contract/build.sh b/build/lib/app/data/container-build/cerc-test-contract/build.sh new file mode 100755 index 00000000..9df768fb --- /dev/null +++ b/build/lib/app/data/container-build/cerc-test-contract/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/test-contract +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/test-contract:local --build-arg ETH_ADDR=http://go-ethereum:8545 ${build_command_args} ${CERC_REPO_BASE_DIR}/ipld-eth-db-validator/test/contract diff --git a/build/lib/app/data/container-build/cerc-tx-spammer/build.sh b/build/lib/app/data/container-build/cerc-tx-spammer/build.sh new file mode 100755 index 00000000..ee4494e0 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-tx-spammer/build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# Build cerc/tx-spammer +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh +docker build -t cerc/tx-spammer:local ${build_command_args} ${CERC_REPO_BASE_DIR}/tx-spammer diff --git a/build/lib/app/data/container-build/cerc-uniswap-v3-info/Dockerfile b/build/lib/app/data/container-build/cerc-uniswap-v3-info/Dockerfile new file mode 100644 index 00000000..a01e2ed2 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-uniswap-v3-info/Dockerfile @@ -0,0 +1,13 @@ +FROM node:15.3.0-alpine3.10 + +RUN apk --update --no-cache add make git + +WORKDIR /app + +COPY . . + +RUN echo "Building uniswap-v3-info" && \ + git checkout v0.1.1 && \ + yarn + +CMD ["sh", "-c", "yarn start"] diff --git a/build/lib/app/data/container-build/cerc-uniswap-v3-info/build.sh b/build/lib/app/data/container-build/cerc-uniswap-v3-info/build.sh new file mode 100755 index 00000000..efbc1c08 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-uniswap-v3-info/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/uniswap-v3-info + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/uniswap-v3-info:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-v3-info diff --git a/build/lib/app/data/container-build/cerc-watcher-erc20/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-erc20/Dockerfile new file mode 100644 index 00000000..5ec94987 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-erc20/Dockerfile @@ -0,0 +1,13 @@ +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add git python3 alpine-sdk + +WORKDIR /app + +COPY . . + +RUN echo "Building watcher-ts" && \ + git checkout v0.2.19 && \ + yarn && yarn build + +WORKDIR /app/packages/erc20-watcher diff --git a/build/lib/app/data/container-build/cerc-watcher-erc20/build.sh b/build/lib/app/data/container-build/cerc-watcher-erc20/build.sh new file mode 100755 index 00000000..67ac0d24 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-erc20/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-erc20 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-erc20:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/watcher-ts diff --git a/build/lib/app/data/container-build/cerc-watcher-erc721/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-erc721/Dockerfile new file mode 100644 index 00000000..6358df9a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-erc721/Dockerfile @@ -0,0 +1,13 @@ +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add git python3 alpine-sdk + +WORKDIR /app + +COPY . . + +RUN echo "Building watcher-ts" && \ + git checkout v0.2.19 && \ + yarn && yarn build + +WORKDIR /app/packages/erc721-watcher diff --git a/build/lib/app/data/container-build/cerc-watcher-erc721/build.sh b/build/lib/app/data/container-build/cerc-watcher-erc721/build.sh new file mode 100755 index 00000000..f9c630ac --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-erc721/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-erc721 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-erc721:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/watcher-ts diff --git a/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile new file mode 100644 index 00000000..6c100091 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:22.04 + +RUN apt-get update \ + && apt-get install -y curl wget gnupg build-essential \ + && curl --silent --location https://deb.nodesource.com/setup_18.x | bash - \ + && apt-get update \ + && apt-get install -y nodejs git busybox jq \ + && node -v + +RUN corepack enable \ + && yarn --version + +WORKDIR /app + +COPY . . + +RUN echo "Building mobymask-v2-watcher-ts" && \ + yarn && yarn build + +WORKDIR /app diff --git a/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/build.sh b/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/build.sh new file mode 100755 index 00000000..b26ad999 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-mobymask-v2/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-mobymask-v2 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-mobymask-v2:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/mobymask-v2-watcher-ts diff --git a/build/lib/app/data/container-build/cerc-watcher-mobymask/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-mobymask/Dockerfile new file mode 100644 index 00000000..7994237a --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-mobymask/Dockerfile @@ -0,0 +1,14 @@ +# TODO: move this into the cerc-io/mobymask-watcher repo +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add git python3 alpine-sdk + +WORKDIR /app + +COPY . . + +RUN echo "Building watcher-ts" && \ + git checkout v0.2.19 && \ + yarn && yarn build + +WORKDIR /app/packages/mobymask-watcher diff --git a/build/lib/app/data/container-build/cerc-watcher-mobymask/build.sh b/build/lib/app/data/container-build/cerc-watcher-mobymask/build.sh new file mode 100755 index 00000000..219b864f --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-mobymask/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Build cerc/watcher-mobymask + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-mobymask:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/watcher-ts + +# TODO: add a mechanism to pass two repos into a container rather than the parent directory diff --git a/build/lib/app/data/container-build/cerc-watcher-ts/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-ts/Dockerfile new file mode 100644 index 00000000..75ba2efa --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-ts/Dockerfile @@ -0,0 +1,10 @@ +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add git python3 alpine-sdk jq + +WORKDIR /app + +COPY . . + +RUN echo "Building watcher-ts" && \ + yarn && yarn build diff --git a/build/lib/app/data/container-build/cerc-watcher-ts/build.sh b/build/lib/app/data/container-build/cerc-watcher-ts/build.sh new file mode 100755 index 00000000..e180ec0e --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-ts/build.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Build cerc/watcher-erc20 + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-ts:local -f ${SCRIPT_DIR}/Dockerfile ${CERC_REPO_BASE_DIR}/watcher-ts diff --git a/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile b/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile new file mode 100644 index 00000000..f400755b --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/Dockerfile @@ -0,0 +1,11 @@ +FROM node:16.17.1-alpine3.16 + +RUN apk --update --no-cache add git python3 alpine-sdk + +WORKDIR /app + +COPY . . + +RUN echo "Building uniswap-watcher-ts" && \ + git checkout v0.3.4 && \ + yarn && yarn build && yarn build:contracts diff --git a/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/build.sh b/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/build.sh new file mode 100755 index 00000000..5dc63d50 --- /dev/null +++ b/build/lib/app/data/container-build/cerc-watcher-uniswap-v3/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-uniswap-v3 + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-uniswap-v3:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/uniswap-watcher-ts diff --git a/build/lib/app/data/container-build/default-build.sh b/build/lib/app/data/container-build/default-build.sh new file mode 100755 index 00000000..6757812b --- /dev/null +++ b/build/lib/app/data/container-build/default-build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Usage: default-build.sh [] +# if is not supplied, the context is the directory where the Dockerfile lives + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +if [[ $# -ne 2 ]]; then + echo "Illegal number of parameters" >&2 + exit 1 +fi +image_tag=$1 +build_dir=$2 +docker build -t ${image_tag} ${build_command_args} --build-arg CERC_HOST_UID=${CERC_HOST_UID} --build-arg CERC_HOST_GID=${CERC_HOST_GID} ${build_dir} diff --git a/build/lib/app/data/container-image-list.txt b/build/lib/app/data/container-image-list.txt new file mode 100644 index 00000000..8a3266dd --- /dev/null +++ b/build/lib/app/data/container-image-list.txt @@ -0,0 +1,39 @@ +cerc/foundry +cerc/test-contract +cerc/eth-statediff-fill-service +cerc/eth-statediff-service +cerc/go-ethereum +cerc/go-ethereum-foundry +cerc/lighthouse +cerc/ipld-eth-db +cerc/ipld-eth-beacon-db +cerc/ipld-eth-beacon-indexer +cerc/ipld-eth-server +cerc/laconicd +cerc/laconic-registry-cli +cerc/laconic-console-host +cerc/fixturenet-eth-geth +cerc/fixturenet-eth-lighthouse +cerc/watcher-ts +cerc/watcher-mobymask +cerc/watcher-erc20 +cerc/watcher-erc721 +cerc/watcher-uniswap-v3 +cerc/uniswap-v3-info +cerc/watcher-mobymask-v2 +cerc/react-peer +cerc/mobymask-ui +cerc/mobymask +cerc/test-container +cerc/eth-probe +cerc/builder-js +cerc/keycloak +cerc/tx-spammer +cerc/builder-gerbil +cerc/act-runner +cerc/act-runner-task-executor +cerc/optimism-l2geth +cerc/optimism-op-batcher +cerc/optimism-op-node +cerc/optimism-op-proposer +cerc/lotus diff --git a/build/lib/app/data/npm-package-list.txt b/build/lib/app/data/npm-package-list.txt new file mode 100644 index 00000000..f194e627 --- /dev/null +++ b/build/lib/app/data/npm-package-list.txt @@ -0,0 +1,7 @@ +laconic-sdk +laconic-registry-cli +laconic-console +debug +crypto +sdk +gem diff --git a/build/lib/app/data/pod-list.txt b/build/lib/app/data/pod-list.txt new file mode 100644 index 00000000..a598d467 --- /dev/null +++ b/build/lib/app/data/pod-list.txt @@ -0,0 +1,27 @@ +contract +ipld-eth-db +eth-statediff-fill-service +go-ethereum-foundry +ipld-eth-beacon-db +ipld-eth-beacon-indexer +ipld-eth-server +lighthouse +laconicd +fixturenet-laconicd +fixturenet-eth +fixturenet-eth-metrics +watcher-mobymask +watcher-erc20 +watcher-erc721 +watcher-uniswap-v3 +watcher-mobymask-v2 +mobymask-app +peer-test-app +test +eth-probe +keycloak +tx-spammer +kubo +foundry +fixturenet-optimism +fixturenet-lotus diff --git a/build/lib/app/data/repository-list.txt b/build/lib/app/data/repository-list.txt new file mode 100644 index 00000000..8740b123 --- /dev/null +++ b/build/lib/app/data/repository-list.txt @@ -0,0 +1,30 @@ +cerc-io/ipld-eth-db +cerc-io/go-ethereum +cerc-io/ipld-eth-server +cerc-io/eth-statediff-service +cerc-io/eth-statediff-fill-service +cerc-io/ipld-eth-db-validator +cerc-io/ipld-eth-beacon-indexer +cerc-io/ipld-eth-beacon-db +cerc-io/laconicd +cerc-io/laconic-sdk +cerc-io/laconic-registry-cli +cerc-io/laconic-console +cerc-io/mobymask-watcher +cerc-io/watcher-ts +cerc-io/mobymask-v2-watcher-ts +cerc-io/MobyMask +vulcanize/uniswap-watcher-ts +vulcanize/uniswap-v3-info +vulcanize/assemblyscript +cerc-io/eth-probe +cerc-io/tx-spammer +dboreham/foundry +lirewine/gem +lirewine/debug +lirewine/crypto +lirewine/sdk +telackey/act_runner +ethereum-optimism/op-geth +ethereum-optimism/optimism +filecoin-project/lotus diff --git a/build/lib/app/data/stacks/build-support/README.md b/build/lib/app/data/stacks/build-support/README.md new file mode 100644 index 00000000..b4090d76 --- /dev/null +++ b/build/lib/app/data/stacks/build-support/README.md @@ -0,0 +1,62 @@ +# Build Support Stack + +## Instructions + +JS/TS/NPM builds need an npm registry to store intermediate package artifacts. +This can be supplied by the user (e.g. using a hosted registry or even npmjs.com), or a local registry using gitea can be deployed by stack orchestrator. +To use a user-supplied registry set these environment variables: + +`CERC_NPM_REGISTRY_URL` and +`CERC_NPM_AUTH_TOKEN` + +Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry. + +### 1. Build support containers + +Note: the scheme/gerbil container is excluded as it isn't currently required for the package registry. + +``` +$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil +``` +### 2. Deploy Gitea Package Registry + +``` +$ laconic-so --stack package-registry setup-repositories +$ laconic-so --stack package-registry build-containers +$ laconic-so --stack package-registry deploy up +[+] Running 3/3 + ⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s + ⠿ Container laconic-aecc4a21d3a502b14522db97d427e850-db-1 Started 1.2s + ⠿ Container laconic-aecc4a21d3a502b14522db97d427e850-server-1 Started 1.9s +New user 'gitea_admin' has been successfully created! +This is your gitea access token: 84fe66a73698bf11edbdccd0a338236b7d1d5c45. Keep it safe and secure, it can not be fetched again from gitea. +To use with laconic-so set this environment variable: export CERC_NPM_AUTH_TOKEN=3e493e77b3e83fe9e882f7e3a79dd4d5441c308b +Created the organization cerc-io +Gitea was configured to use host name: gitea.local, ensure that this resolves to localhost, e.g. with sudo vi /etc/hosts +Success, gitea is properly initialized +$ +``` + +Note: the above commands can take several minutes depending on the specs of your machine. + +### 3. Configure the hostname gitea.local +How to do this is OS-dependent but usually involves editing a `hosts` file. For example on Linux add this line to the file `/etc/hosts` (needs sudo): +``` +127.0.0.1 gitea.local +``` +Test with: +``` +$ ping gitea.local +PING gitea.local (127.0.0.1) 56(84) bytes of data. +64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.147 ms +64 bytes from localhost (127.0.0.1): icmp_seq=2 ttl=64 time=0.033 ms +``` +Although not necessary in order to build and publish packages, you can now access the Gitea web interface at: [http://gitea.local:3000](http://gitea.local:3000) using these credentials: gitea_admin/admin1234 (Note: please properly secure Gitea if public internet access is allowed). + +Now npm packages can be built: +### Build npm Packages +Ensure that `CERC_NPM_AUTH_TOKEN` is set with the token printed above when the package-registry stack was deployed (the actual token value will be different than shown in this example): +``` +$ export CERC_NPM_AUTH_TOKEN=84fe66a73698bf11edbdccd0a338236b7d1d5c45 +$ laconic-so build-npms --include laconic-sdk,laconic-registry-cli +``` diff --git a/build/lib/app/data/stacks/build-support/stack.yml b/build/lib/app/data/stacks/build-support/stack.yml new file mode 100644 index 00000000..e4efb457 --- /dev/null +++ b/build/lib/app/data/stacks/build-support/stack.yml @@ -0,0 +1,6 @@ +version: "1.1" +name: build-support +decription: "Build Support Components" +containers: + - cerc/builder-js + - cerc/builder-gerbil diff --git a/build/lib/app/data/stacks/erc20/README.md b/build/lib/app/data/stacks/erc20/README.md new file mode 100644 index 00000000..43687e3f --- /dev/null +++ b/build/lib/app/data/stacks/erc20/README.md @@ -0,0 +1,157 @@ +# ERC20 Watcher + +Instructions to deploy a local ERC20 watcher stack (core + watcher) for demonstration and testing purposes using [stack orchestrator](/README.md#install) + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack erc20 setup-repositories +``` + +Build the core and watcher container images: + +```bash +laconic-so --stack erc20 build-containers +``` + +This should create the required docker images in the local image registry. + +Deploy the stack: + +```bash +laconic-so --stack erc20 deploy-system up +``` + +## Demo + +Find the watcher container's id using `docker ps` and export it for later use: + +```bash +export CONTAINER_ID= +``` + +Deploy an ERC20 token: + +```bash +docker exec $CONTAINER_ID yarn token:deploy:docker +``` + +Export the address of the deployed token to a shell variable for later use: + +```bash +export TOKEN_ADDRESS= +``` + +Open `http://localhost:3002/graphql` (GraphQL Playground) in a browser window + +Connect MetaMask to `http://localhost:8545` (with chain ID `99`) + +Add the deployed token as an asset in MetaMask and check that the initial balance is zero + +Export your MetaMask account (second account) address to a shell variable for later use: + +```bash +export RECIPIENT_ADDRESS= +``` + +To get the primary account's address, run: + +```bash +docker exec $CONTAINER_ID yarn account:docker +``` + +To get the current block hash at any time, run: + +```bash +docker exec $CONTAINER_ID yarn block:latest:docker +``` + +Fire a GQL query in the playground to get the name, symbol and total supply of the deployed token: + +```graphql +query { + name( + blockHash: "LATEST_BLOCK_HASH" + token: "TOKEN_ADDRESS" + ) { + value + proof { + data + } + } + + symbol( + blockHash: "LATEST_BLOCK_HASH" + token: "TOKEN_ADDRESS" + ) { + value + proof { + data + } + } + + totalSupply( + blockHash: "LATEST_BLOCK_HASH" + token: "TOKEN_ADDRESS" + ) { + value + proof { + data + } + } +} +``` + +Fire the following query to get balances for the primary and the recipient account at the latest block hash: + +```graphql +query { + fromBalanceOf: balanceOf( + blockHash: "LATEST_BLOCK_HASH" + token: "TOKEN_ADDRESS", + # primary account having all the balance initially + owner: "PRIMARY_ADDRESS" + ) { + value + proof { + data + } + } + toBalanceOf: balanceOf( + blockHash: "LATEST_BLOCK_HASH" + token: "TOKEN_ADDRESS", + owner: "RECIPIENT_ADDRESS" + ) { + value + proof { + data + } + } +} +``` + +- The initial balance for the primary account should be `1000000000000000000000` +- The initial balance for the recipient should be `0` + +Transfer tokens to the recipient account: + +```bash +docker exec $CONTAINER_ID yarn token:transfer:docker --token $TOKEN_ADDRESS --to $RECIPIENT_ADDRESS --amount 100 +``` + +Fire the above GQL query again with the latest block hash to get updated balances for the primary (`from`) and the recipient (`to`) account: + +- The balance for the primary account should be reduced by the transfer amount (`100`) +- The balance for the recipient account should be equal to the transfer amount (`100`) + +Transfer funds between different accounts using MetaMask and use the playground to query the balance before and after the transfer. + +## Clean up + +To stop all the services running in background run: + +```bash +laconic-so --stack erc20 deploy-system down +``` diff --git a/build/lib/app/data/stacks/erc20/stack.yml b/build/lib/app/data/stacks/erc20/stack.yml new file mode 100644 index 00000000..57baa9eb --- /dev/null +++ b/build/lib/app/data/stacks/erc20/stack.yml @@ -0,0 +1,20 @@ +version: "1.0" +name: erc20-watcher +repos: + - cerc-io/go-ethereum + - cerc-io/ipld-eth-db + - cerc-io/ipld-eth-server + - cerc-io/watcher-ts + - dboreham/foundry +containers: + - cerc/foundry + - cerc/go-ethereum + - cerc/go-ethereum-foundry + - cerc/ipld-eth-db + - cerc/ipld-eth-server + - cerc/watcher-erc20 +pods: + - go-ethereum-foundry + - ipld-eth-db + - ipld-eth-server + - watcher-erc20 diff --git a/build/lib/app/data/stacks/erc721/README.md b/build/lib/app/data/stacks/erc721/README.md new file mode 100644 index 00000000..7ace03c3 --- /dev/null +++ b/build/lib/app/data/stacks/erc721/README.md @@ -0,0 +1,214 @@ +# ERC721 Watcher + +Instructions to deploy a local ERC721 watcher stack (core + watcher) for demonstration and testing purposes using [laconic-stack-orchestrator](../../README.md#setup) + +## Setup + +* Clone / pull required repositories: + +```bash +laconic-so --stack erc721 setup-repositories +``` + +* Build the core and watcher container images: + +```bash +laconic-so --stack erc721 build-containers +``` + + This should create the required docker images in the local image registry. + +* Deploy the stack: + +```bash +laconic-so --stack erc721 deploy-system up +``` + +## Demo + +* Find the watcher container's id using `docker ps` and export it for later use: + + ```bash + $ export CONTAINER_ID= + ``` + +* Deploy an ERC721 token: + + ```bash + $ docker exec $CONTAINER_ID yarn nft:deploy:docker + ``` + + Export the address of the deployed token to a shell variable for later use: + + ```bash + $ export NFT_ADDRESS= + ``` + +* Open `http://localhost:3009/graphql` (GraphQL Playground) in a browser window + +* Connect MetaMask to `http://localhost:8545` (with chain ID `99`) + +* Export your MetaMask account (second account) address to a shell variable for later use: + + ```bash + $ export RECIPIENT_ADDRESS= + ``` + +* To get the primary account's address, run: + + ```bash + $ docker exec $CONTAINER_ID yarn account:docker + ``` + + Export it to shell variable for later use: + + ```bash + $ export PRIMARY_ADDRESS= + ``` + +* To get the current block hash at any time, run: + + ```bash + $ docker exec $CONTAINER_ID yarn block:latest:docker + ``` + +* Fire the following GQL query (uses `eth_call`) in the playground: + + ```graphql + query { + name( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + ) { + value + proof { + data + } + } + + symbol( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + ) { + value + proof { + data + } + } + + balanceOf( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + owner: "PRIMARY_ADDRESS" + ) { + value + proof { + data + } + } + } + ``` + + Balance for the `PRIMARY_ADDRESS` should be `0` as the token is yet to be minted. + +* Fire the following GQL query (uses `storage` calls) in the playground: + + ```graphql + query { + _name( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + ) { + value + proof { + data + } + } + + _symbol( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + ) { + value + proof { + data + } + } + + _balances( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + key0: "PRIMARY_ADDRESS" + ) { + value + proof { + data + } + } + } + ``` + +* Mint the token: + + ```bash + $ docker exec $CONTAINER_ID yarn nft:mint:docker --nft $NFT_ADDRESS --to $PRIMARY_ADDRESS --token-id 1 + ``` + + Fire the GQL query above again with latest block hash. The balance should increase to `1`. + +* Get the latest block hash and run the following GQL query in the playground for `balanceOf` and `ownerOf` (`eth_call`): + + ```graphql + query { + fromBalanceOf: balanceOf( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + owner: "PRIMARY_ADDRESS" + ) { + value + proof { + data + } + } + + toBalanceOf: balanceOf( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + owner: "RECIPIENT_ADDRESS" + ) { + value + proof { + data + } + } + + ownerOf( + blockHash: "LATEST_BLOCK_HASH" + contractAddress: "NFT_ADDRESS" + tokenId: 1 + ) { + value + proof { + data + } + } + } + ``` + + Balance should be `1` for the `PRIMARY_ADDRESS`, `0` for the `RECIPIENT_ADDRESS` and owner value of the token should be equal to the `PRIMARY_ADDRESS`. + +* Transfer the token: + + ```bash + $ docker exec $CONTAINER_ID yarn nft:transfer:docker --nft $NFT_ADDRESS --from $PRIMARY_ADDRESS --to $RECIPIENT_ADDRESS --token-id 1 + ``` + + Fire the GQL query above again with the latest block hash. The token should be transferred to the recipient. + +## Clean up + +* To stop all the services running in background: + +```bash +laconic-so --stack erc721 deploy-system down +``` diff --git a/build/lib/app/data/stacks/erc721/stack.yml b/build/lib/app/data/stacks/erc721/stack.yml new file mode 100644 index 00000000..28791473 --- /dev/null +++ b/build/lib/app/data/stacks/erc721/stack.yml @@ -0,0 +1,18 @@ +version: "1.0" +name: erc721-watcher +repos: + - cerc-io/go-ethereum + - cerc-io/ipld-eth-db + - cerc-io/ipld-eth-server + - cerc-io/watcher-ts +containers: + - cerc/go-ethereum + - cerc/go-ethereum-foundry + - cerc/ipld-eth-db + - cerc/ipld-eth-server + - cerc/watcher-erc721 +pods: + - go-ethereum-foundry + - ipld-eth-db + - ipld-eth-server + - watcher-erc721 diff --git a/build/lib/app/data/stacks/fixturenet-eth-loaded/README.md b/build/lib/app/data/stacks/fixturenet-eth-loaded/README.md new file mode 100644 index 00000000..09eab027 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-eth-loaded/README.md @@ -0,0 +1,6 @@ +# fixturenet-eth + +A "loaded" version of fixturenet-eth, with all the bells and whistles enabled. + +TODO: write me + diff --git a/build/lib/app/data/stacks/fixturenet-eth-loaded/stack.yml b/build/lib/app/data/stacks/fixturenet-eth-loaded/stack.yml new file mode 100644 index 00000000..ced862f9 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-eth-loaded/stack.yml @@ -0,0 +1,24 @@ +version: "1.0" +name: fixturenet-eth-loaded +decription: "Loaded Ethereum Fixturenet" +repos: + - cerc-io/go-ethereum + - cerc-io/tx-spammer + - cerc-io/ipld-eth-server + - cerc-io/ipld-eth-db + - cerc/go-ethereum +containers: + - cerc/lighthouse + - cerc/fixturenet-eth-geth + - cerc/fixturenet-eth-lighthouse + - cerc/ipld-eth-server + - cerc/ipld-eth-db + - cerc/keycloak + - cerc/tx-spammer +pods: + - fixturenet-eth + - tx-spammer + - fixturenet-eth-metrics + - keycloak + - ipld-eth-server + - ipld-eth-db diff --git a/build/lib/app/data/stacks/fixturenet-eth/README.md b/build/lib/app/data/stacks/fixturenet-eth/README.md new file mode 100644 index 00000000..c54237aa --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-eth/README.md @@ -0,0 +1,124 @@ +# fixturenet-eth + +Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator#user-mode)): + +## Clone required repositories + +``` +$ laconic-so --stack fixturenet-eth setup-repositories +``` + +## Build the fixturenet-eth containers + +``` +$ laconic-so --stack fixturenet-eth build-containers +``` + +This should create several container images in the local image registry: + +* cerc/go-ethereum +* cerc/lighthouse +* cerc/fixturenet-eth-geth +* cerc/fixturenet-eth-lighthouse + +## Deploy the stack + +``` +$ laconic-so --stack fixturenet-eth deploy up +``` + +## Check status + +``` +$ laconic-so --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh +Waiting for geth to generate DAG.... done +Waiting for beacon phase0.... done +Waiting for beacon altair.... done +Waiting for beacon bellatrix pre-merge.... done +Waiting for beacon bellatrix merge.... done + +$ laconic-so --stack fixturenet-eth deploy ps +Running containers: +id: c6538b60c0328dadfa2c5585c4d09674a6a13e6d712ff1cd82a26849e4e5679b, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-bootnode-geth-1, ports: 0.0.0.0:58909->30303/tcp, 0.0.0.0:58910->9898/tcp +id: 5b70597a8211bc7e78d33e50486cb565a7f4a9ce581ce150b3bb450e342bdeda, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-bootnode-lighthouse-1, ports: +id: 19ed78867b6c534d893835cdeb1e89a9ea553b8e8c02ab02468e4bd1563a340f, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-geth-1-1, ports: 0.0.0.0:58911->40000/tcp, 0.0.0.0:58912->6060/tcp, 0.0.0.0:58913->8545/tcp +id: 8da0e30a1ce33122d8fd2225e4d26c7f30eb4bfbfa743f2af04d9db5d0bf7fa6, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-geth-2-1, ports: +id: 387a42a14971034588ba9aeb9b9e2ca7fc0cc61b96f8fe8c2ab770c9d6fb1e0f, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-lighthouse-1-1, ports: 0.0.0.0:58917->8001/tcp +id: de5115bf89087bae03b291664a73ffe3554fe23e79e4b8345e088b040d5580ac, name: laconic-b12fa16e999821562937781f8ab0b1e8-fixturenet-eth-lighthouse-2-1, ports: +id: 2a7e5a0fb2be7fc9261a7b725a40818facbbe6d0cb2497d82c0e02de0a8e959b, name: laconic-b12fa16e999821562937781f8ab0b1e8-foundry-1, ports: + +$ laconic-so --stack fixturenet-eth deploy exec foundry "cast block-number" +3 +``` + +## Additional pieces + +Several other containers can used with the basic `fixturenet-eth`: + +* `ipld-eth-db` (enables statediffing) +* `ipld-eth-server` (GQL and Ethereum API server, requires `ipld-eth-db`) +* `ipld-eth-beacon-db` and `ipld-eth-beacon-indexer` (for indexing Beacon chain blocks) +* `eth-probe` (captures eth1 tx gossip) +* `keycloak` (nginx proxy with keycloak auth for API authentication) +* `tx-spammer` (generates and sends automated transactions to the fixturenet) + +It is not necessary to use them all at once, but a complete example follows: + +``` +# Setup +$ laconic-so setup-repositories --include cerc-io/go-ethereum,cerc-io/ipld-eth-db,cerc-io/ipld-eth-server,cerc-io/ipld-eth-beacon-db,cerc-io/ipld-eth-beacon-indexer,cerc-io/eth-probe,cerc-io/tx-spammer + +# Build +$ laconic-so build-containers --include cerc/go-ethereum,cerc/lighthouse,cerc/fixturenet-eth-geth,cerc/fixturenet-eth-lighthouse,cerc/ipld-eth-db,cerc/ipld-eth-server,cerc/ipld-eth-beacon-db,cerc/ipld-eth-beacon-indexer,cerc/eth-probe,cerc/keycloak,cerc/tx-spammer + +# Deploy +$ laconic-so deploy-system --include db,fixturenet-eth,ipld-eth-server,ipld-eth-beacon-db,ipld-eth-beacon-indexer,eth-probe,keycloak,tx-spammer up + +# Status + +$ container-build/cerc-fixturenet-eth-lighthouse/scripts/status.sh +Waiting for geth to generate DAG.... done +Waiting for beacon phase0.... done +Waiting for beacon altair.... done +Waiting for beacon bellatrix pre-merge.... done +Waiting for beacon bellatrix merge.... done + +$ docker ps -f 'name=laconic' --format 'table {{.Names}}\t{{.Ports}}' | cut -d'-' -f3- | sort +NAMES PORTS +eth-probe-db-1 0.0.0.0:55849->5432/tcp +eth-probe-mq-1 +eth-probe-probe-1 +fixturenet-eth-bootnode-geth-1 8545-8546/tcp, 30303/udp, 0.0.0.0:55847->9898/tcp, 0.0.0.0:55848->30303/tcp +fixturenet-eth-bootnode-lighthouse-1 +fixturenet-eth-geth-1-1 8546/tcp, 30303/tcp, 30303/udp, 0.0.0.0:55851->8545/tcp +fixturenet-eth-geth-2-1 8545-8546/tcp, 30303/tcp, 30303/udp +fixturenet-eth-lighthouse-1-1 0.0.0.0:55858->8001/tcp +fixturenet-eth-lighthouse-2-1 +ipld-eth-beacon-db-1 127.0.0.1:8076->5432/tcp +ipld-eth-beacon-indexer-1 +ipld-eth-db-1 127.0.0.1:8077->5432/tcp +ipld-eth-server-1 127.0.0.1:8081-8082->8081-8082/tcp +keycloak-1 8443/tcp, 0.0.0.0:55857->8080/tcp +keycloak-db-1 0.0.0.0:55850->5432/tcp +keycloak-nginx-1 0.0.0.0:55859->80/tcp +migrations-1 +tx-spammer-1 +``` + +## Clean up + +Stop all services running in the background: + +```bash +$ laconic-so --stack fixturenet-eth deploy down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +$ docker volume ls -q --filter "name=.*fixturenet_eth_bootnode_geth_data|.*fixturenet_eth_bootnode_lighthouse_data|.*fixturenet_eth_geth_1_data|.*fixturenet_eth_geth_2_data|.*fixturenet_eth_lighthouse_1_data|.*fixturenet_eth_lighthouse_2_data" + +# Remove all the listed volumes +$ docker volume rm $(docker volume ls -q --filter "name=.*fixturenet_eth_bootnode_geth_data|.*fixturenet_eth_bootnode_lighthouse_data|.*fixturenet_eth_geth_1_data|.*fixturenet_eth_geth_2_data|.*fixturenet_eth_lighthouse_1_data|.*fixturenet_eth_lighthouse_2_data") +``` diff --git a/build/lib/app/data/stacks/fixturenet-eth/stack.yml b/build/lib/app/data/stacks/fixturenet-eth/stack.yml new file mode 100644 index 00000000..a44f086e --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-eth/stack.yml @@ -0,0 +1,15 @@ +version: "1.1" +name: fixturenet-eth +decription: "Ethereum Fixturenet" +repos: + - cerc-io/go-ethereum + - dboreham/foundry +containers: + - cerc/go-ethereum + - cerc/lighthouse + - cerc/fixturenet-eth-geth + - cerc/fixturenet-eth-lighthouse + - cerc/foundry +pods: + - fixturenet-eth + - foundry diff --git a/build/lib/app/data/stacks/fixturenet-laconic-loaded/README.md b/build/lib/app/data/stacks/fixturenet-laconic-loaded/README.md new file mode 100644 index 00000000..b94189c2 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconic-loaded/README.md @@ -0,0 +1,66 @@ +# Laconic Fixturenet (experimental) + +Testing a "Loaded" fixturenet with console. + +Instructions for deploying a local Laconic blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator. + +## 1. Install Laconic Stack Orchestrator +Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: +``` +$ mkdir my-working-dir +$ cd my-working-dir +$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +$ chmod +x ./laconic-so +$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory +``` +## 2. Prepare the local build environment +Note that this step needs only to be done once on a new machine. +Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands: +``` +$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil +$ laconic-so --stack package-registry setup-repositories +$ laconic-so --stack package-registry build-containers +$ laconic-so --stack package-registry deploy-system up +``` +Then add the localhost alias `gitea.local` and set `CERC_NPM_AUTH_TOKEN` to the token printed when the package-registry stack was deployed above: +``` +$ sudo vi /etc/hosts +$ export CERC_NPM_AUTH_TOKEN= +``` + +## 3. Clone required repositories +``` +$ laconic-so --stack fixturenet-laconicd setup-repositories +``` +## 4. Build the stack's packages and containers +``` +$ laconic-so --stack fixturenet-laconicd build-npms +$ laconic-so --stack fixturenet-laconicd build-containers +``` +## 5. Deploy the stack +``` +$ laconic-so --stack fixturenet-laconicd deploy up +``` +Correct operation should be verified by checking the laconicd container's logs with: +``` +$ laconic-so --stack fixturenet-laconicd deploy logs +``` +## 6. Test with the Registry CLI +``` +$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status" +``` +## 7. View the laconic console +Get the URL for the console web app with this command (the port number will be different for each deployment): +``` +$ echo http://localhost:$(laconic-so --stack fixturenet-laconic-loaded deploy port laconic-console 80 | cut -d ':' -f 2) +http://localhost:58364 +``` +Open that address with a browser. The console should display +## 8. Load demo data into the registry +``` +$ laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-demo-records.sh +Balance is: 99998999999999998999600000 +Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a +Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly +``` +The published record should be visible in the console. diff --git a/build/lib/app/data/stacks/fixturenet-laconic-loaded/stack.yml b/build/lib/app/data/stacks/fixturenet-laconic-loaded/stack.yml new file mode 100644 index 00000000..009212f0 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconic-loaded/stack.yml @@ -0,0 +1,32 @@ +version: "1.1" +name: fixturenet-laconic-loaded +description: "A full featured laconic fixturenet" +repos: + - cerc-io/laconicd + - lirewine/debug + - lirewine/crypto + - lirewine/gem + - lirewine/sdk + - cerc-io/laconic-sdk + - cerc-io/laconic-registry-cli + - cerc-io/laconic-console +npms: + - laconic-sdk + - laconic-registry-cli + - debug + - crypto + - sdk + - gem + - laconic-console +containers: + - cerc/laconicd + - cerc/laconic-registry-cli + - cerc/laconic-console-host +pods: + - fixturenet-laconicd + - fixturenet-laconic-console +config: + cli: + key: laconicd.mykey + address: laconicd.myaddress + diff --git a/build/lib/app/data/stacks/fixturenet-laconicd copy/README.md b/build/lib/app/data/stacks/fixturenet-laconicd copy/README.md new file mode 100644 index 00000000..4cfab1ba --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconicd copy/README.md @@ -0,0 +1,2 @@ +# Lotus Fixturenet + diff --git a/build/lib/app/data/stacks/fixturenet-laconicd copy/stack.yml b/build/lib/app/data/stacks/fixturenet-laconicd copy/stack.yml new file mode 100644 index 00000000..35617c4b --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconicd copy/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: fixturenet-lotus +description: "A lotus fixturenet" +repos: + - filecoin-project/lotus +containers: + - cerc/lotus +pods: + - fixturenet-lotus diff --git a/build/lib/app/data/stacks/fixturenet-laconicd/README.md b/build/lib/app/data/stacks/fixturenet-laconicd/README.md new file mode 100644 index 00000000..77a017a2 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconicd/README.md @@ -0,0 +1,48 @@ +# Laconicd Fixturenet + +Instructions for deploying a local Laconic blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator. + +## 1. Install Laconic Stack Orchestrator +Installation is covered in detail [here](https://github.com/cerc-io/stack-orchestrator#user-mode) but if you're on Linux and already have docker installed it should be as simple as: +``` +$ mkdir my-working-dir +$ cd my-working-dir +$ curl -L -o ./laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so +$ chmod +x ./laconic-so +$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory +``` +## 2. Prepare the local build environment +Note that this step needs only to be done once on a new machine. +Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands: +``` +$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil +$ laconic-so --stack package-registry setup-repositories +$ laconic-so --stack package-registry deploy-system up +``` +Then add the localhost alias `gitea.local` and set `CERC_NPM_AUTH_TOKEN` to the token printed when the package-registry stack was deployed above: +``` +$ sudo vi /etc/hosts +$ export CERC_NPM_AUTH_TOKEN= +``` + +## 3. Clone required repositories +``` +$ laconic-so --stack fixturenet-laconicd setup-repositories +``` +## 4. Build the stack's packages and containers +``` +$ laconic-so --stack fixturenet-laconicd build-npms +$ laconic-so --stack fixturenet-laconicd build-containers +``` +## 5. Deploy the stack +``` +$ laconic-so --stack fixturenet-laconicd deploy up +``` +Correct operation should be verified by checking the laconicd container's logs with: +``` +$ laconic-so --stack fixturenet-laconicd deploy logs +``` +## 6. Test with the Registry CLI +``` +$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status" +``` diff --git a/build/lib/app/data/stacks/fixturenet-laconicd/stack.yml b/build/lib/app/data/stacks/fixturenet-laconicd/stack.yml new file mode 100644 index 00000000..54c5dba6 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-laconicd/stack.yml @@ -0,0 +1,19 @@ +version: "1.0" +name: fixturenet-laconicd +description: "A laconicd fixturenet" +repos: + - cerc-io/laconicd + - cerc-io/laconic-sdk + - cerc-io/laconic-registry-cli +npms: + - laconic-sdk + - laconic-registry-cli +containers: + - cerc/laconicd + - cerc/laconic-registry-cli +pods: + - fixturenet-laconicd +config: + cli: + key: laconicd.mykey + address: laconicd.myaddress diff --git a/build/lib/app/data/stacks/fixturenet-lotus/README.md b/build/lib/app/data/stacks/fixturenet-lotus/README.md new file mode 100644 index 00000000..4cfab1ba --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-lotus/README.md @@ -0,0 +1,2 @@ +# Lotus Fixturenet + diff --git a/build/lib/app/data/stacks/fixturenet-lotus/stack.yml b/build/lib/app/data/stacks/fixturenet-lotus/stack.yml new file mode 100644 index 00000000..35617c4b --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-lotus/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: fixturenet-lotus +description: "A lotus fixturenet" +repos: + - filecoin-project/lotus +containers: + - cerc/lotus +pods: + - fixturenet-lotus diff --git a/build/lib/app/data/stacks/fixturenet-optimism/README.md b/build/lib/app/data/stacks/fixturenet-optimism/README.md new file mode 100644 index 00000000..7664a7c3 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-optimism/README.md @@ -0,0 +1,124 @@ +# fixturenet-optimism + +Instructions to setup and deploy an end-to-end L1+L2 stack with [fixturenet-eth](../fixturenet-eth/) (L1) and [Optimism](https://stack.optimism.io) (L2) + +We support running just the L2 part of stack, given an external L1 endpoint. Follow [l2-only](./l2-only.md) for the same. + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack fixturenet-optimism setup-repositories + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Checkout to the required versions and branches in repos: + +```bash +# Optimism +cd ~/cerc/optimism +git checkout v1.0.4 +``` + +Build the container images: + +```bash +laconic-so --stack fixturenet-optimism build-containers +``` + +This should create the required docker images in the local image registry: +* `cerc/go-ethereum` +* `cerc/lighthouse` +* `cerc/fixturenet-eth-geth` +* `cerc/fixturenet-eth-lighthouse` +* `cerc/foundry` +* `cerc/optimism-contracts` +* `cerc/optimism-l2geth` +* `cerc/optimism-op-node` +* `cerc/optimism-op-batcher` +* `cerc/optimism-op-proposer` + +## Deploy + +Deploy the stack: + +```bash +laconic-so --stack fixturenet-optimism deploy up +``` + +The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: +1. waits for the 'Merge' to happen on L1 +2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) +3. deploys the L1 contracts + +To list down and monitor the running containers: + +```bash +laconic-so --stack fixturenet-optimism deploy ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +## Clean up + +Stop all services running in the background: + +```bash +laconic-so --stack fixturenet-optimism deploy down 30 +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +``` + +## Troubleshooting + +* If `op-geth` service aborts or is restarted, the following error might occur in the `op-node` service: + + ```bash + WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found" + ``` + +* This means that the data directory that `op-geth` is using is corrupted and needs to be reinitialized; the containers `op-geth`, `op-node` and `op-batcher` need to be started afresh: + + WARNING: This will reset the L2 chain; consequently, all the data on it will be lost + + * Stop and remove the concerned containers: + + ```bash + # List the containers + docker ps -f "name=op-geth|op-node|op-batcher" + + # Force stop and remove the listed containers + docker rm -f $(docker ps -qf "name=op-geth|op-node|op-batcher") + ``` + + * Remove the concerned volume: + + ```bash + # List the volume + docker volume ls -q --filter name=l2_geth_data + + # Remove the listed volume + docker volume rm $(docker volume ls -q --filter name=l2_geth_data) + ``` + + * Re-run the deployment command used in [Deploy](#deploy) to restart the stopped containers + +## Known Issues + +* `fixturenet-eth` currently starts fresh on a restart +* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side + * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) diff --git a/build/lib/app/data/stacks/fixturenet-optimism/l2-only.md b/build/lib/app/data/stacks/fixturenet-optimism/l2-only.md new file mode 100644 index 00000000..8cac9ce1 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-optimism/l2-only.md @@ -0,0 +1,108 @@ +# fixturenet-optimism + +Instructions to setup and deploy L2 fixturenet using [Optimism](https://stack.optimism.io) + +## Setup + +Prerequisite: An L1 Ethereum RPC endpoint + +Clone required repositories: + +```bash +laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Checkout to the required versions and branches in repos: + +```bash +# Optimism +cd ~/cerc/optimism +git checkout v1.0.4 +``` + +Build the container images: + +```bash +laconic-so --stack fixturenet-optimism build-containers --include cerc/foundry,cerc/optimism-contracts,cerc/optimism-op-node,cerc/optimism-l2geth,cerc/optimism-op-batcher,cerc/optimism-op-proposer +``` + +This should create the required docker images in the local image registry: +* `cerc/foundry` +* `cerc/optimism-contracts` +* `cerc/optimism-l2geth` +* `cerc/optimism-op-node` +* `cerc/optimism-op-batcher` +* `cerc/optimism-op-proposer` + +## Deploy + +Create and update an env file to be used in the next step ([defaults](../../config/fixturenet-optimism/l1-params.env)): + + ```bash + # External L1 endpoint + CERC_L1_CHAIN_ID= + CERC_L1_RPC= + CERC_L1_HOST= + CERC_L1_PORT= + + # URL to get CSV with credentials for accounts on L1 + # that are used to send balance to Optimism Proxy contract + # (enables them to do transactions on L2) + CERC_L1_ACCOUNTS_CSV_URL= + + # OR + # Specify the required account credentials + CERC_L1_ADDRESS= + CERC_L1_PRIV_KEY= + CERC_L1_ADDRESS_2= + CERC_L1_PRIV_KEY_2= + ``` + +* NOTE: If L1 is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +Deploy the stack: + +```bash +laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism --env-file up +``` + +The `fixturenet-optimism-contracts` service may take a while (`~15 mins`) to complete running as it: +1. waits for the 'Merge' to happen on L1 +2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) +3. deploys the L1 contracts + +To list down and monitor the running containers: + +```bash +laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +## Clean up + +Stop all services running in the background: + +```bash +laconic-so --stack fixturenet-optimism deploy --include fixturenet-optimism down 30 +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +``` + +## Troubleshooting + +See [Troubleshooting](./README.md#troubleshooting) diff --git a/build/lib/app/data/stacks/fixturenet-optimism/stack.yml b/build/lib/app/data/stacks/fixturenet-optimism/stack.yml new file mode 100644 index 00000000..45bb1709 --- /dev/null +++ b/build/lib/app/data/stacks/fixturenet-optimism/stack.yml @@ -0,0 +1,22 @@ +version: "1.0" +name: fixturenet-optimism +decription: "Optimism Fixturenet" +repos: + - cerc-io/go-ethereum + - dboreham/foundry + - ethereum-optimism/optimism + - ethereum-optimism/op-geth +containers: + - cerc/go-ethereum + - cerc/lighthouse + - cerc/fixturenet-eth-geth + - cerc/fixturenet-eth-lighthouse + - cerc/foundry + - cerc/optimism-contracts + - cerc/optimism-op-node + - cerc/optimism-l2geth + - cerc/optimism-op-batcher + - cerc/optimism-op-proposer +pods: + - fixturenet-eth + - fixturenet-optimism diff --git a/build/lib/app/data/stacks/kubo/README.md b/build/lib/app/data/stacks/kubo/README.md new file mode 100644 index 00000000..e9b12321 --- /dev/null +++ b/build/lib/app/data/stacks/kubo/README.md @@ -0,0 +1,30 @@ +# Kubo (IPFS) + +The Kubo stack currently uses the native IPFS docker image, therefore a single command will do: + +``` +laconic-so --stack kubo deploy up +``` + +If running locally, visit: http://localhost:5001/webui and explore the functionality of the WebUI. + +If running in the cloud, visit `IP:5001/webui` and you'll likely see this error: "Could not connect to the IPFS API". To fix it: + +1. Get the container name with `docker ps`: + +2. Go into the container (replace with your container name): + +``` +docker exec -it laconic-dbbf5498fd7d322930b9484121a6a5f4-ipfs-1 sh +``` + +3. Enable CORS as described in point 2 of the error message. Copy/paste/run each line in sequence, then run `exit` to exit the container. + +4. Restart the container: + +``` +laconic-so --stack kubo deploy down +laconic-so --stack kubo deploy up +``` + +5. Refresh the `IP:5001/webui` URL in your browser, you should now be connected to IPFS. diff --git a/build/lib/app/data/stacks/kubo/stack.yml b/build/lib/app/data/stacks/kubo/stack.yml new file mode 100644 index 00000000..2552df38 --- /dev/null +++ b/build/lib/app/data/stacks/kubo/stack.yml @@ -0,0 +1,7 @@ +version: "1.0" +name: kubo +description: "Run kubo (IPFS)" +repos: +containers: +pods: + - kubo diff --git a/build/lib/app/data/stacks/mobymask-v2/README.md b/build/lib/app/data/stacks/mobymask-v2/README.md new file mode 100644 index 00000000..32fdbf60 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/README.md @@ -0,0 +1,125 @@ +# MobyMask v2 watcher + +Instructions to setup and deploy an end-to-end MobyMask v2 stack ([L1](../fixturenet-eth/) + [L2](../fixturenet-optimism/) chains + watcher + web-app(s)) using [laconic-stack-orchestrator](/README.md#install) + +We support running just the watcher part of stack, given an external L2 Optimism endpoint. +Follow [mobymask-only](./mobymask-only.md) for the same. + +We also support running just the web-app(s), given external watcher GQL (for mobymask-app) and relay node endpoints. Follow [web-apps.md](./web-apps.md) for the same. + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack mobymask-v2 setup-repositories +``` + +NOTE: If repositories already exist and are checked out to different versions, `setup-repositories` command will throw an error. +For getting around this, the repositories mentioned below can be removed and then run the command. + +Checkout to the required versions and branches in repos + +```bash +# watcher-ts +cd ~/cerc/watcher-ts +git checkout v0.2.39 + +# mobymask-v2-watcher-ts +cd ~/cerc/mobymask-v2-watcher-ts +git checkout v0.1.0 + +# MobyMask +cd ~/cerc/MobyMask +git checkout v0.1.2 + +# Optimism +cd ~/cerc/optimism +git checkout v1.0.4 +``` + +Build the container images: + +```bash +laconic-so --stack mobymask-v2 build-containers +``` + +This should create the required docker images in the local image registry. + +Deploy the stack: + +* Deploy the containers: + + ```bash + laconic-so --stack mobymask-v2 deploy-system up + ``` + +* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` + + NOTE: The `mobymask-app` container might not start; if the app is not running at http://localhost:3002, restart the container using it's id: + + ```bash + docker ps -a | grep "mobymask-app" + + docker restart + ``` + +## Tests + +Find the watcher container's id and export it for later use: + +```bash +export CONTAINER_ID=$(docker ps -q --filter "name=peer-tests") +``` + +Run the peer tests: + +```bash +docker exec $CONTAINER_ID yarn test +``` + +## Web Apps + +Check that the web-app containers are healthy: + +```bash +docker ps | grep -E 'mobymask-app|peer-test-app' +``` + +### mobymask-app + +The mobymask-app should be running at http://localhost:3002 + +### peer-test-app + +The peer-test-app should be running at http://localhost:3003 + +## Details + +* The relay node for p2p network is running at http://localhost:9090 + +* The [peer package](https://github.com/cerc-io/watcher-ts/tree/main/packages/peer) (published in [gitea](https://git.vdb.to/cerc-io/-/packages/npm/@cerc-io%2Fpeer)) can be used in client code for connecting to the network + +* The [react-peer package](https://github.com/cerc-io/react-peer/tree/main/packages/react-peer) (published in [gitea](https://git.vdb.to/cerc-io/-/packages/npm/@cerc-io%2Freact-peer)) which uses the peer package can be used in react app for connecting to the network + +## Demo + +Follow the [demo](./demo.md) to try out the MobyMask app with L2 chain + +## Clean up + +Stop all the services running in background run: + +```bash +laconic-so --stack mobymask-v2 deploy-system down 30 +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") +``` diff --git a/build/lib/app/data/stacks/mobymask-v2/demo.md b/build/lib/app/data/stacks/mobymask-v2/demo.md new file mode 100644 index 00000000..1e1d6f01 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/demo.md @@ -0,0 +1,124 @@ +# Demo + +* Get the root invite link URL for mobymask-app: + + ```bash + docker logs -f $(docker ps -aq --filter name="mobymask-1") + ``` + + The invite link is seen at the end of the logs. Example log: + + ```bash + laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-1 | http://127.0.0.1:3002/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D + ``` + +* Open the invite link in a browser to use the mobymask-app. + + NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations + +* In the debug panel, check if it is connected to the p2p network (it should be connected to at least one other peer for pubsub to work). + +* Create an invite link in the app by clicking on `Create new invite link` button. + +* Switch to the `MESSAGES` tab in debug panel for viewing incoming messages later. + +* Open the invite link in a new browser with different profile (to simulate remote browser) + * Check that it is connected to any other peer in the network. + +* In `Report a phishing attempt` section, report multiple phishers using the `Submit` button. Click on the `Submit batch to p2p network` button. This broadcasts signed invocations to the connected peers. + +* In the `MESSAGES` tab of other browsers, a message can be seen with the signed invocations. + +* In a terminal, check logs from the watcher peer container: + + ```bash + docker logs -f $(docker ps -aq --filter name="mobymask-watcher-server") + ``` + +* It should have received the message, sent transaction to L2 chain and received a transaction receipt for an `invoke` message with block details. + + Example log: + + ```bash + 2023-03-23T10:25:19.771Z vulcanize:peer-listener [10:25:19] Received a message on mobymask P2P network from peer: 12D3KooWAVNswtcrX12iDYukEoxdQwD34kJyRWcQTfZ4unGg2xjd + 2023-03-23T10:25:24.143Z laconic:libp2p-utils Transaction receipt for invoke message { + to: '0x558024C7d593B840E1BfD83E9B287a5CDad4db15', + blockNumber: 1996, + blockHash: '0xebef19c21269654804b2ef2d4bb5cb6c88743b37ed77e82222dc5671debf3afb', + transactionHash: '0xf8c5a093a93f793012196073a7d0cb3ed6fbd2846126c066cb31c72100960cb1', + effectiveGasPrice: '1500000007', + gasUsed: '250000' + } + ``` + +* Check the phisher in watcher GQL: http://localhost:3001/graphql + * Use the blockHash from transaction receipt details or query for latest block: + + ```gql + query { + latestBlock { + hash + number + } + } + ``` + + * Get the deployed contract address: + + ```bash + docker exec -it $(docker ps -aq --filter name="mobymask-app") cat /config/config.yml + ``` + + The value of `address` field is the deployed contract address + + * Check for phisher value + + ```gql + query { + isPhisher( + blockHash: "TX_OR_LATEST_BLOCK_HASH", + contractAddress: "CONTRACT_ADDRESS", + # If reported phisher name was "test" then key0 value is "TWT:test" + key0: "TWT:PHISHER_NAME" + ) { + value + } + } + ``` + + It should return `true` for reported phisher names. + + * Watcher internally is using L2 chain `eth_getStorageAt` method. + +* Check the phisher name in mobymask app in `Check Phisher Status` section. + * Watcher GQL API is used for checking phisher. + +* Manage the invitations by clicking on the `Outstanding Invitations in p2p network`. + +* Revoke the created invitation by clicking on `Revoke (p2p network)` + +* Revocation messages can be seen in the debug panel `MESSAGES` tab of other browsers. + +* Check the watcher peer logs. It should receive a message and log the transaction receipt for a `revoke` message. + +* Try reporting a phisher from the revoked invitee's browser. + + * The invocation message for reporting phisher would be broadcasted to all peers. + + * Check the watcher peer logs. A transaction failed error should be logged. + + * Check the reported phisher in [watcher GQL](https://localhost:3001/graphql) + + ```gql + query { + isPhisher( + blockHash: "LATEST_BLOCK_HASH", + contractAddress: "CONTRACT_ADDRESS", + key0: "TWT:PHISHER_NAME" + ) { + value + } + } + ``` + + It should return `false` as the invitation/delegation used for reporting phishers has been revoked. diff --git a/build/lib/app/data/stacks/mobymask-v2/mobymask-only.md b/build/lib/app/data/stacks/mobymask-v2/mobymask-only.md new file mode 100644 index 00000000..4cfb4ecf --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/mobymask-only.md @@ -0,0 +1,135 @@ +# MobyMask v2 watcher + +Instructions to setup and deploy MobyMask v2 watcher independently + +## Setup + +Prerequisite: L2 Optimism Geth and Node RPC endpoints + +Clone required repositories: + +```bash +laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/mobymask-v2-watcher-ts + +# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command +``` + +Checkout to the required versions and branches in repos: + +```bash +# watcher-ts +cd ~/cerc/watcher-ts +git checkout v0.2.39 + +# mobymask-v2-watcher-ts +cd ~/cerc/mobymask-v2-watcher-ts +git checkout v0.1.0 + +# MobyMask +cd ~/cerc/MobyMask +git checkout v0.1.2 +``` + +Build the container images: + +```bash +laconic-so --stack mobymask-v2 build-containers --include cerc/watcher-ts,cerc/watcher-mobymask-v2,cerc/mobymask +``` + +This should create the required docker images in the local image registry + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step ([defaults](../../config/watcher-mobymask-v2/)): + + ```bash + # External L2 endpoints + CERC_L2_GETH_RPC= + + # Endpoints waited on before contract deployment + CERC_L2_GETH_HOST= + CERC_L2_GETH_PORT= + + CERC_L2_NODE_HOST= + CERC_L2_NODE_PORT= + + # URL to get CSV with credentials for accounts on L1 to perform txs on L2 + CERC_L1_ACCOUNTS_CSV_URL= + + # OR + # Specify the required account credentials + CERC_PRIVATE_KEY_DEPLOYER= + CERC_PRIVATE_KEY_PEER= + + # Base URI for mobymask-app + # (used for generating a root invite link after deploying the contract) + CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#" + + # (Optional) Set of relay peers to connect to from the relay node + CERC_RELAY_PEERS=[] + + # (Optional) Domain to be used in the relay node's announce address + CERC_RELAY_ANNOUNCE_DOMAIN= + + # Set to false for disabling watcher peer to send txs to L2 + CERC_ENABLE_PEER_L2_TXS=true + + # (Optional) Set already deployed MobyMask contract address to avoid deploying contract in the stack + CERC_DEPLOYED_CONTRACT= + ``` + +* NOTE: If Optimism is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +### Deploy the stack + +```bash +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 --env-file up +``` + +To list down and monitor the running containers: + +```bash +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 ps + +# With status +docker ps + +# Check logs for a container +docker logs -f +``` + +The watcher endpoint is exposed on host port `3001` and the relay node endpoint is exposed on host port `9090` + +Check the logs of the deployment container to get the deployed contract's address and generated root invite link: + +```bash +docker logs -f $(docker ps -aq --filter name="mobymask-1") +``` + +## Tests + +See [Tests](./README.md#tests) + +## Web Apps + +For deploying the web-app(s) separately after deploying the watcher, follow [web-apps.md](./web-apps.md) + +## Clean up + +Stop all services running in the background: + +```bash +laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment") +``` diff --git a/build/lib/app/data/stacks/mobymask-v2/stack.yml b/build/lib/app/data/stacks/mobymask-v2/stack.yml new file mode 100644 index 00000000..b91e4461 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/stack.yml @@ -0,0 +1,31 @@ +version: "1.0" +name: mobymask-v2 +repos: + - cerc-io/go-ethereum + - dboreham/foundry + - ethereum-optimism/optimism + - ethereum-optimism/op-geth + - cerc-io/watcher-ts + - cerc-io/mobymask-v2-watcher-ts + - cerc-io/MobyMask +containers: + - cerc/go-ethereum + - cerc/lighthouse + - cerc/fixturenet-eth-geth + - cerc/fixturenet-eth-lighthouse + - cerc/foundry + - cerc/optimism-contracts + - cerc/optimism-l2geth + - cerc/optimism-op-batcher + - cerc/optimism-op-node + - cerc/watcher-ts + - cerc/watcher-mobymask-v2 + - cerc/react-peer + - cerc/mobymask-ui + - cerc/mobymask +pods: + - fixturenet-eth + - fixturenet-optimism + - watcher-mobymask-v2 + - mobymask-app + - peer-test-app diff --git a/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md b/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md new file mode 100644 index 00000000..74d95b92 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/watcher.md @@ -0,0 +1,319 @@ +# MobyMask Watcher P2P Network + +Instructions to setup and deploy a watcher that connects to the existing watcher p2p network + +## Prerequisites + +* Laconic Stack Orchestrator ([installation](/README.md#install)) +* A publicly reachable domain name with SSL setup + +This demo has been tested on a `Ubuntu 22.04 LTS` machine with `8GB` of RAM + +## Setup + +Clone required repositories: + + ```bash + laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/mobymask-v2-watcher-ts + + # This will clone the required repositories at ~/cerc + # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned in the next step and re-run the command + + # Expected output: + + # Dev Root is: /home/xyz/cerc + # Checking: /home/xyz/cerc/watcher-ts: Needs to be fetched + # 100%|#############################################################################################################################################| 9.96k/9.96k [00:05<00:00, 1.70kB/s] + # Checking: /home/xyz/cerc/mobymask-v2-watcher-ts: Needs to be fetched + # 100%|################################################################################################################################################| 19.0/19.0 [00:01<00:00, 13.6B/s] + # Checking: /home/xyz/cerc/MobyMask: Needs to be fetched + # 100%|##############################################################################################################################################| 1.41k/1.41k [00:18<00:00, 76.4B/s] + ``` + +Checkout to the required versions and branches in repos: + + ```bash + # watcher-ts + cd ~/cerc/watcher-ts + git checkout v0.2.39 + + # mobymask-v2-watcher-ts + cd ~/cerc/mobymask-v2-watcher-ts + git checkout v0.1.0 + + # MobyMask + cd ~/cerc/MobyMask + git checkout v0.1.2 + ``` + +Build the container images: + + ```bash + laconic-so --stack mobymask-v2 build-containers --include cerc/watcher-ts,cerc/watcher-mobymask-v2,cerc/mobymask + ``` + +Check that the required images are created in the local image registry: + + ```bash + docker image ls + + # Expected output: + + # REPOSITORY TAG IMAGE ID CREATED SIZE + # cerc/watcher-mobymask-v2 local c4dba5dc8d48 24 seconds ago 1.02GB + # cerc/watcher-ts local 9ef61478c243 9 minutes ago 1.84GB + # cerc/mobymask local 9db3f1a69966 2 weeks ago 3.82GB + # . + # . + ``` + +## Deploy + +### Configuration + +Create an env file `mobymask-watcher.env`: + + ```bash + touch mobymask-watcher.env + ``` + +Add the following contents to `mobymask-watcher.env`: + + ```bash + # Domain to be used in the relay node's announce address + CERC_RELAY_ANNOUNCE_DOMAIN="mobymask.example.com" + + + # DO NOT CHANGE THESE VALUES + CERC_L2_GETH_RPC="https://mobymask-l2.dev.vdb.to" + CERC_DEPLOYED_CONTRACT="0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9" + CERC_ENABLE_PEER_L2_TXS=false + CERC_RELAY_PEERS=["/dns4/relay1.dev.vdb.to/tcp/443/wss/p2p/12D3KooWAx83SM9GWVPc9v9fNzLzftRX6EaAFMjhYiFxRYqctcW1", "/dns4/relay2.dev.vdb.to/tcp/443/wss/p2p/12D3KooWBycy6vHVEfUwwYRbPLBdb5gx9gtFSEMpErYPUjUkDNkm", "/dns4/relay3.dev.vdb.to/tcp/443/wss/p2p/12D3KooWARcUJsiGCgiygiRVVK94U8BNSy8DFBbzAF3B6orrabwn"] + ``` + +Replace `CERC_RELAY_ANNOUNCE_DOMAIN` with your public domain name + +### Deploy the stack + +```bash +laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 --env-file mobymask-watcher.env up + +# Expected output (ignore the "The X variable is not set. Defaulting to a blank string." warnings): + +# [+] Running 9/9 +# ✔ Network mobymask_v2_default Created 0.1s +# ✔ Volume "mobymask_v2_peers_ids" Created 0.0s +# ✔ Volume "mobymask_v2_mobymask_watcher_db_data" Created 0.0s +# ✔ Volume "mobymask_v2_mobymask_deployment" Created 0.0s +# ✔ Container mobymask_v2-mobymask-watcher-db-1 Healthy 22.2s +# ✔ Container mobymask_v2-mobymask-1 Exited 2.2s +# ✔ Container mobymask_v2-peer-ids-gen-1 Exited 23.9s +# ✔ Container mobymask_v2-mobymask-watcher-server-1 Healthy 43.6s +# ✔ Container mobymask_v2-peer-tests-1 Started 44.5s +``` + +This will run the `mobymask-v2-watcher` including: +* A relay node which is in a federated setup with relay nodes set in the env file +* A peer node which connects to the watcher relay node as an entrypoint to the MobyMask watcher p2p network. This peer listens for messages from other peers on the network and logs them out to the console + +The watcher GraphQL endpoint is exposed on host port `3001` and the relay node endpoint is exposed on host port `9090` + +To list down and monitor the running containers: + + ```bash + laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 ps + + # Expected output: + + # Running containers: + # id: 25cc3a1cbda27fcd9c2ad4c772bd753ccef1e178f901a70e6ff4191d4a8684e9, name: mobymask_v2-mobymask-watcher-db-1, ports: 0.0.0.0:15432->5432/tcp + # id: c9806f78680d68292ffe942222af2003aa3ed5d5c69d7121b573f5028444391d, name: mobymask_v2-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp + # id: 6b30a1d313a88fb86f8a3b37a1b1a3bc053f238664e4b2d196c3ec74e04faf13, name: mobymask_v2-peer-tests-1, ports: + + + # With status + docker ps + + # Expected output: + + # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + # 6b30a1d313a8 cerc/watcher-ts:local "docker-entrypoint.s…" 5 minutes ago Up 4 minutes mobymask_v2-peer-tests-1 + # c9806f78680d cerc/watcher-mobymask-v2:local "sh start-server.sh" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp mobymask_v2-mobymask-watcher-server-1 + # 25cc3a1cbda2 postgres:14-alpine "docker-entrypoint.s…" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:15432->5432/tcp mobymask_v2-mobymask-watcher-db-1 + + + # Check logs for a container + docker logs -f + ``` + +Check watcher container logs to get multiaddr advertised by the watcher's relay node and note it down for further usage: + + ```bash + laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 logs mobymask-watcher-server | grep -A 2 "Relay node started" + + # The multiaddr will be of form /dns4//tcp/443/wss/p2p/ + # Expected output: + + # mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Relay node started with id 12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai (characteristic-black-pamella) + # mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.069Z laconic:relay Listening on: + # mobymask_v2-mobymask-watcher-server-1 | 2023-04-20T04:22:57.070Z laconic:relay /dns4/mobymask.example.com/tcp/443/wss/p2p/12D3KooWKef84LAcBNb9wZNs6jC5kQFXjddo47hK6AGHD2dSvGai + ``` + +## Web App + +To be able to connect to the relay node from remote peers, it needs to be publicly reachable. +Configure your website with SSL and the `https` traffic reverse proxied as: +* `/graphql` to port `3001` (watcher GQL endpoint) +* `/` to port `9090` (relay node) + +For example, a Nginx configuration for domain `mobymask.example.com` would look something like: + + ```bash + server { + server_name mobymask.example.com; + + location /graphql { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://127.0.0.1:3001; + proxy_read_timeout 90; + } + + # https://nginx.org/en/docs/http/websocket.html + location / { + proxy_pass http://127.0.0.1:9090; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + # set a large timeout to avoid websocket disconnects + proxy_read_timeout 86400; + } + + listen [::]:443 ssl ipv6only=on; # managed by Certbot + listen 443 ssl; # managed by Certbot + ssl_certificate /etc/letsencrypt/live/mobymask.example.com/fullchain.pem; # managed by Certbot + ssl_certificate_key /etc/letsencrypt/live/mobymask.example.com/privkey.pem; # managed by Certbot + include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot + } + + server { + if ($host = mobymask.example.com) { + return 301 https://$host$request_uri; + } # managed by Certbot + + listen 80; + listen [::]:80; + + server_name mobymask.example.com; + return 404; # managed by Certbot + } + ``` + +To test the web-app, either visit https://mobymask-lxdao-app.dev.vdb.to/ or follow [web-app.md](./web-app.md) to deploy the app locally that hits your watcher's GQL endpoint + +Connect a browser peer to the watcher's relay node: +* Click on debug panel on bottom right of the homepage +* Select `` in `Primary Relay` dropdown on the right and enter the watcher relay node's multiaddr +* Click on `UPDATE` to refresh the page and connect to the watcher's relay node; you should see the relay node's multiaddr in `Self Node Info` on the debug panel +* Switch to the `GRAPH (PEERS)` tab to see peers connected to this browser node and the `GRAPH (NETWORK)` tab to see the whole MobyMask p2p network + +Perform transactions: +* An invitation is required to be able to perform transactions; ask an existing user of the app for an invite +* In a browser, close the app if it's already open and then open the invite link +* From the debug panel, confirm that the browser peer is connected to at least one other peer +* Check the status for a phisher to be reported in the `Check Phisher Status` section on homepage +* Select `Report Phisher` option in the `Pending reports` section, enter multiple phisher records and click on the `Submit batch to p2p network` button; this broadcasts signed invocations to peers on the network, including the watcher peer +* Check the watcher container logs to see the message received: + ```bash + docker logs $(docker ps -aq --filter name="mobymask-watcher-server") + + # Expected output: + + # . + # . + # 2023-04-20T04:42:01.072Z vulcanize:libp2p-utils [4:42:1] Received a message on mobymask P2P network from peer: 12D3KooWDKCke8hrjm4evwc9HzUzPZXeVTEQqmfLCkdNaXQ7efAZ + # 2023-04-20T04:42:01.072Z vulcanize:libp2p-utils Signed invocations: + # 2023-04-20T04:42:01.073Z vulcanize:libp2p-utils [ + # { + # "signature": "0x18dc2f4092473cbcc4636eb922f6abf17675368363675779e67d2c14bb0a135f6029da12671a3367463d41720938c84bb3ceed727721c3bbc50d8739859412801c", + # "invocations": { + # "batch": [ + # { + # "transaction": { + # "to": "0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9", + # "data": "0x6b6dc9de00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000c5457543a70686973686572310000000000000000000000000000000000000000", + # "gasLimit": 500000 + # }, + # "authority": [ + # { + # "signature": "0x0f91c765faaf851550ddd4345d1bc11eebbf29fde0306a8051f9d3c679c6d6856f66753cad8fcff25203a3e0528b3d7673371343f66a39424f6281c474eada431c", + # "delegation": { + # "delegate": "0x1B85a1485582C3389F62EB9F2C88f0C89bb1C1F4", + # "authority": "0x0000000000000000000000000000000000000000000000000000000000000000", + # "caveats": [ + # { + # "enforcer": "0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9", + # "terms": "0x0000000000000000000000000000000000000000000000000000000000000000" + # } + # ] + # } + # } + # ] + # } + # ], + # "replayProtection": { + # "nonce": 1, + # "queue": 64298938 + # } + # } + # } + # ] + # 2023-04-20T04:42:01.087Z vulcanize:libp2p-utils method: claimIfPhisher, value: TWT:phisher1 + # 2023-04-20T04:42:01.087Z vulcanize:libp2p-utils ------------------------------------------ + # . + # . + ``` +* Now, check the status for reported phishers again and confirm that they have been registered + +## Clean up + +Stop all services running in the background: + + ```bash + laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 down + + # Expected output: + + # [+] Running 6/6 + # ✔ Container mobymask_v2-peer-tests-1 Removed 10.5s + # ✔ Container mobymask_v2-mobymask-watcher-server-1 Removed 10.8s + # ✔ Container mobymask_v2-peer-ids-gen-1 Removed 0.0s + # ✔ Container mobymask_v2-mobymask-1 Removed 0.0s + # ✔ Container mobymask_v2-mobymask-watcher-db-1 Removed 0.6s + # ✔ Network mobymask_v2_default Removed 0.5s + ``` + +Clear volumes created by this stack: + + ```bash + # List all relevant volumes + docker volume ls -q --filter "name=mobymask_v2" + + # Expected output: + + # mobymask_v2_mobymask_deployment + # mobymask_v2_mobymask_watcher_db_data + # mobymask_v2_peers_ids + + + # Remove all the listed volumes + docker volume rm $(docker volume ls -q --filter "name=mobymask_v2") + ``` + +## Troubleshooting + +* If you don't see any peer connections being formed in the debug panel on https://mobymask-lxdao-app.dev.vdb.to/, try clearing out the website's local storage and refreshing the page diff --git a/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md b/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md new file mode 100644 index 00000000..cf1821b5 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/watcher-p2p-network/web-app.md @@ -0,0 +1,162 @@ +# MobyMask Watcher P2P Network - Web App + +Instructions to setup and deploy the MobyMask app locally, pointed to a watcher on the p2p network + +## Prerequisites + +* Laconic Stack Orchestrator ([installation](/README.md#install)) +* Watcher GQL endpoint + +## Setup + +Build the container images: + + ```bash + laconic-so --stack mobymask-v2 build-containers --include cerc/react-peer,cerc/mobymask-ui + ``` + +Check that the required images are created in the local image registry: + + ```bash + docker image ls + + # Expected output: + + # REPOSITORY TAG IMAGE ID CREATED SIZE + # cerc/react-peer local d66b144dbb53 4 days ago 868MB + # cerc/mobymask-ui local e456bf9937ec 4 days ago 1.67GB + # . + # . + ``` + +## Deploy + +### Configuration + +Create an env file `mobymask-app.env`: + + ```bash + touch mobymask-app.env + ``` + +Add the following contents to `mobymask-app.env`: + + ```bash + # Watcher endpoint used by the app for GQL queries + CERC_APP_WATCHER_URL="http://127.0.0.1:3001" + + + # DO NOT CHANGE THESE VALUES + CERC_DEPLOYED_CONTRACT="0x2B6AFbd4F479cE4101Df722cF4E05F941523EaD9" + CERC_RELAY_NODES=["/dns4/relay1.dev.vdb.to/tcp/443/wss/p2p/12D3KooWAx83SM9GWVPc9v9fNzLzftRX6EaAFMjhYiFxRYqctcW1","/dns4/relay2.dev.vdb.to/tcp/443/wss/p2p/12D3KooWBycy6vHVEfUwwYRbPLBdb5gx9gtFSEMpErYPUjUkDNkm","/dns4/relay3.dev.vdb.to/tcp/443/wss/p2p/12D3KooWARcUJsiGCgiygiRVVK94U8BNSy8DFBbzAF3B6orrabwn"] + ``` + +Replace `CERC_APP_WATCHER_URL` with the watcher's endpoint (eg. `https://mobymask.example.com`) + +### Deploy the stack + +```bash +laconic-so --stack mobymask-v2 deploy --cluster mm_v2 --include mobymask-app --env-file mobymask-app.env up lxdao-mobymask-app + +# Expected output (ignore the "The X variable is not set. Defaulting to a blank string." warnings): + +# [+] Running 4/4 +# ✔ Network mm_v2_default Created 0.1s +# ✔ Volume "mm_v2_peers_ids" Created 0.0s +# ✔ Volume "mm_v2_mobymask_deployment" Created 0.0s +# ✔ Container mm_v2-lxdao-mobymask-app-1 Started 1.1s +``` + +This will run the `lxdao-mobymask-app` (at `http://localhost:3004`) pointed to `CERC_APP_WATCHER_URL` for GQL queries + +To monitor the running container: + + ```bash + # With status + docker ps + + # Expected output: + + # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + # f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3004->80/tcp mm_v2-lxdao-mobymask-app-1 + + # Check logs for a container + docker logs -f mm_v2-lxdao-mobymask-app-1 + + # Expected output: + + # . + # . + # . + # Available on: + # http://127.0.0.1:80 + # http://192.168.0.2:80 + # Hit CTRL-C to stop the server + ``` + +Note: For opening an invite link on this deployed app, replace the URL part before `/#` with `http://localhost:3004` +For example: `http://localhost:3004/#/members?invitation=XYZ` + +In order to host the app using a public domain, configure your website with SSL and `https` traffic reverse proxied to port `3004`. + +For example, a Nginx configuration for domain `my-mobymask-app.example.com` would look something like: + + ```bash + server { + server_name my-mobymask-app.example.com; + + location / { + proxy_pass http://localhost:3004; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + listen [::]:443 ssl; + listen 443 ssl; + ssl_certificate /etc/letsencrypt/live/my-mobymask-app.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/my-mobymask-app.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; + } + + server { + if ($host = my-mobymask-app.example.com) { + return 301 https://$host$request_uri; + } # managed by Certbot + + server_name my-mobymask-app.example.com; + listen 80; + return 404; # managed by Certbot + } + ``` + +## Clean up + +Stop all services running in the background: + + ```bash + laconic-so --stack mobymask-v2 deploy --cluster mm_v2 --include mobymask-app down + + # Expected output: + + # [+] Running 2/2 + # ✔ Container mm_v2-lxdao-mobymask-app-1 Removed 10.6s + # ✔ Network mm_v2_default Removed 0.5s + ``` + +Clear volumes created by this stack: + + ```bash + # List all relevant volumes + docker volume ls -q --filter "name=mm_v2" + + # Expected output: + + # mm_v2_mobymask_deployment + # mm_v2_peers_ids + + # Remove all the listed volumes + docker volume rm $(docker volume ls -q --filter "name=mm_v2") + ``` diff --git a/build/lib/app/data/stacks/mobymask-v2/web-apps.md b/build/lib/app/data/stacks/mobymask-v2/web-apps.md new file mode 100644 index 00000000..2eb037f6 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask-v2/web-apps.md @@ -0,0 +1,96 @@ +# Web Apps + +Instructions to setup and deploy MobyMask and Peer Test web apps + +## Setup + +Prerequisite: Watcher with GQL and relay node endpoints + +Build the container images: + +```bash +laconic-so --stack mobymask-v2 build-containers --include cerc/react-peer,cerc/mobymask-ui +``` + +This should create the required docker images in the local image registry + +## Deploy + +### Configuration + +Create and update an env file to be used in the next step ([defaults](../../config/watcher-mobymask-v2/mobymask-params.env)): + + ```bash + # Set of relay nodes to be used by the web-app + # (use double quotes " for strings, avoid space after commas) + # Eg. CERC_RELAY_NODES=["/dns4/example.com/tcp/443/wss/p2p/12D3KooWGHmDDCc93XUWL16FMcTPCGu2zFaMkf67k8HZ4gdQbRDr"] + CERC_RELAY_NODES=[] + + # Also add if running MobyMask app: + + # Watcher endpoint used by the app for GQL queries + CERC_APP_WATCHER_URL="http://127.0.0.1:3001" + + # Set deployed MobyMask contract address to be used in MobyMask app's config + CERC_DEPLOYED_CONTRACT= + + # L2 Chain ID used by mobymask web-app for L2 txs + CERC_CHAIN_ID=42069 + ``` + +* NOTE: If watcher is running on the host machine, use `host.docker.internal` as the hostname to access the host port + +### Deploy the stack + +For running mobymask-app +```bash +laconic-so --stack mobymask-v2 deploy --include mobymask-app --env-file up + +# Runs mobymask-app on host port 3002 and lxdao-mobymask-app on host port 3004 +``` + +For running peer-test-app +```bash +laconic-so --stack mobymask-v2 deploy --include peer-test-app --env-file up + +# Runs on host port 3003 +``` + +To list down and monitor the running containers: + +```bash +laconic-so --stack mobymask-v2 deploy --include [mobymask-app | peer-test-app] ps + +docker ps + +# Check logs for a container +docker logs -f +``` + +## Demo + +Follow the [demo](./demo.md) to try out the MobyMask app with L2 chain + +## Clean up + +Stop all services running in the background: + +For mobymask-app +```bash +laconic-so --stack mobymask-v2 deploy --include mobymask-app down +``` + +For peer-test-app +```bash +laconic-so --stack mobymask-v2 deploy --include peer-test-app down +``` + +Clear volumes created by this stack: + +```bash +# List all relevant volumes +docker volume ls -q --filter "name=.*mobymask_deployment|.*peers_ids" + +# Remove all the listed volumes +docker volume rm $(docker volume ls -q --filter "name=.*mobymask_deployment|.*peers_ids") +``` diff --git a/build/lib/app/data/stacks/mobymask/README.md b/build/lib/app/data/stacks/mobymask/README.md new file mode 100644 index 00000000..d048ba38 --- /dev/null +++ b/build/lib/app/data/stacks/mobymask/README.md @@ -0,0 +1,53 @@ +# MobyMask + +The MobyMask watcher is a Laconic Network component that provides efficient access to MobyMask contract data from Ethereum, along with evidence allowing users to verify the correctness of that data. The watcher source code is available in [this repository](https://github.com/cerc-io/watcher-ts/tree/main/packages/mobymask-watcher) and a developer-oriented Docker Compose setup for the watcher can be found [here](https://github.com/cerc-io/mobymask-watcher). The watcher can be deployed automatically using the Laconic Stack Orchestrator tool as detailed below: + +## Deploy the MobyMask Watcher + +The instructions below show how to deploy a MobyMask watcher using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator#user-mode)). + +This deployment expects that ipld-eth-server's endpoints are available on the local machine at http://ipld-eth-server.example.com:8083/graphql and http://ipld-eth-server.example.com:8082. More advanced configurations are supported by modifying the watcher's [config file](../../config/watcher-mobymask/mobymask-watcher.toml). + +## Clone required repositories + +``` +$ laconic-so setup-repositories --include cerc-io/watcher-ts +``` + +## Build the watcher container + +``` +$ laconic-so build-containers --include cerc/watcher-mobymask +``` + +This should create a container with tag `cerc/watcher-mobymask` in the local image registry. + +## Deploy the stack + +First the watcher database has to be initialized. Start only the mobymask-watcher-db service: + +``` +$ laconic-so deploy-system --include watcher-mobymask up mobymask-watcher-db +``` + +Next find the container's id using `docker ps` then run the following command to initialize the database: + +``` +$ docker exec -i psql -U vdbm mobymask-watcher < config/watcher-mobymask/mobymask-watcher-db.sql +``` + +Finally start the remaining containers: + +``` +$ laconic-so deploy-system --include watcher-mobymask up +``` + +Correct operation should be verified by following the instructions [here](https://github.com/cerc-io/mobymask-watcher/tree/main/mainnet-watcher-only#run), checking GraphQL queries return valid results in the watcher's [playground](http://127.0.0.1:3001/graphql). + +## Clean up + +Stop all the services running in background: + +```bash +$ laconic-so deploy-system --include watcher-mobymask down +``` diff --git a/build/lib/app/data/stacks/mobymask/stack.yml b/build/lib/app/data/stacks/mobymask/stack.yml new file mode 100644 index 00000000..794ec4ab --- /dev/null +++ b/build/lib/app/data/stacks/mobymask/stack.yml @@ -0,0 +1,8 @@ +version: "1.0" +name: mobymask-watcher +repos: + - cerc-io/watcher-ts/v0.2.19 +containers: + - cerc/watcher-mobymask +pods: + - watcher-mobymask \ No newline at end of file diff --git a/build/lib/app/data/stacks/package-registry/README.md b/build/lib/app/data/stacks/package-registry/README.md new file mode 100644 index 00000000..ddb36a3f --- /dev/null +++ b/build/lib/app/data/stacks/package-registry/README.md @@ -0,0 +1,5 @@ +# Package Registry Stack + +The Package Registry Stack supports a build environment that requires a package registry (initially for NPM packages only). + +Setup instructions can be found [here](../build-support/README.md). diff --git a/build/lib/app/data/stacks/package-registry/stack.yml b/build/lib/app/data/stacks/package-registry/stack.yml new file mode 100644 index 00000000..596f6e72 --- /dev/null +++ b/build/lib/app/data/stacks/package-registry/stack.yml @@ -0,0 +1,15 @@ +version: "1.1" +name: package-registry +decription: "Local Package Registry" +repos: + - cerc-io/hosting + - telackey/act_runner +containers: + - cerc/act-runner + - cerc/act-runner-task-executor +pods: + - name: gitea + repository: cerc-io/hosting + path: gitea + pre_start_command: "run-this-first.sh" + post_start_command: "initialize-gitea.sh" diff --git a/build/lib/app/data/stacks/test/README.md b/build/lib/app/data/stacks/test/README.md new file mode 100644 index 00000000..aef333fc --- /dev/null +++ b/build/lib/app/data/stacks/test/README.md @@ -0,0 +1,3 @@ +# Test Stack + +A stack for test/demo purposes. \ No newline at end of file diff --git a/build/lib/app/data/stacks/test/stack.yml b/build/lib/app/data/stacks/test/stack.yml new file mode 100644 index 00000000..44ac8604 --- /dev/null +++ b/build/lib/app/data/stacks/test/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: test +description: "A test stack" +repos: + - cerc-io/laconicd +containers: + - cerc/test-container +pods: + - test diff --git a/build/lib/app/data/stacks/uniswap-v3/README.md b/build/lib/app/data/stacks/uniswap-v3/README.md new file mode 100644 index 00000000..99f55050 --- /dev/null +++ b/build/lib/app/data/stacks/uniswap-v3/README.md @@ -0,0 +1,83 @@ +# Uniswap v3 + +Instructions to deploy Uniswap v3 watcher stack (watcher + uniswap-v3-info frontend app) using [laconic-stack-orchestrator](../../README.md#setup) + +## Prerequisites + +* Access to [uniswap-watcher-ts](https://github.com/vulcanize/uniswap-watcher-ts). + +* This deployment expects core services to be running; specifically, it requires `ipld-eth-server` RPC and GQL endpoints. Update the `upstream.ethServer` endpoints in the [watcher config files](../../config/watcher-uniswap-v3) accordingly: + + ```toml + [upstream] + [upstream.ethServer] + gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql" + rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082" + ``` + +* `uni-watcher` and `uni-info-watcher` database dumps (optional). + +## Setup + +* Clone / pull required repositories: + + ```bash + $ laconic-so setup-repositories --include vulcanize/uniswap-watcher-ts,vulcanize/uniswap-v3-info --git-ssh --pull + ``` + +* Build watcher and info app container images: + + ```bash + $ laconic-so build-containers --include cerc/watcher-uniswap-v3,cerc/uniswap-v3-info + ``` + + This should create the required docker images in the local image registry. + +## Deploy + +* (Optional) Initialize the watcher database with existing database dumps if available: + + * Start the watcher database to be initialized: + + ```bash + $ laconic-so deploy-system --include watcher-uniswap-v3 up uniswap-watcher-db + ``` + + * Find the watcher database container's id using `docker ps` and export it for further usage: + + ```bash + $ export CONTAINER_ID= + ``` + + * Load watcher database dumps: + + ```bash + # uni-watcher database + $ docker exec -i $CONTAINER_ID psql -U vdbm uni-watcher < UNI_WATCHER_DB_DUMP_FILE_PATH.sql + + # uni-info-watcher database + $ docker exec -i $CONTAINER_ID psql -U vdbm uni-info-watcher < UNI_INFO_WATCHER_DB_DUMP_FILE_PATH.sql + ``` + +* Start all the watcher and info app services: + + ```bash + $ laconic-so deploy-system --include watcher-uniswap-v3 up + ``` + +* Check that all the services are up and healthy: + + ```bash + $ docker ps + ``` + + * The `uni-info-watcher` GraphQL Playground can be accessed at `http://localhost:3004/graphql` + * The frontend app can be accessed at `http://localhost:3006` + +## Clean up + +* To stop all the services running in background: + + ```bash + $ laconic-so deploy-system --include watcher-uniswap-v3 down + ``` diff --git a/build/lib/app/data/stacks/uniswap-v3/stack.yml b/build/lib/app/data/stacks/uniswap-v3/stack.yml new file mode 100644 index 00000000..8eec8f87 --- /dev/null +++ b/build/lib/app/data/stacks/uniswap-v3/stack.yml @@ -0,0 +1,10 @@ +version: "1.0" +name: uniswap-v3 +repos: + - vulcanize/uniswap-watcher-ts + - vulcanize/uniswap-v3-info +containers: + - cerc/watcher-uniswap-v3 + - cerc/uniswap-v3-info +pods: + - watcher-uniswap-v3 diff --git a/build/lib/app/data/version.txt b/build/lib/app/data/version.txt new file mode 100644 index 00000000..60248e30 --- /dev/null +++ b/build/lib/app/data/version.txt @@ -0,0 +1,2 @@ +# Manually updated product release version +1.1.0 diff --git a/build/lib/app/deploy_system.py b/build/lib/app/deploy_system.py new file mode 100644 index 00000000..35912518 --- /dev/null +++ b/build/lib/app/deploy_system.py @@ -0,0 +1,339 @@ +# Copyright © 2022, 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# Deploys the system components using docker-compose + +import hashlib +import copy +import os +import sys +from dataclasses import dataclass +from decouple import config +import subprocess +from python_on_whales import DockerClient, DockerException +import click +import importlib.resources +from pathlib import Path +from .util import include_exclude_check, get_parsed_stack_config + + +@click.command() +@click.option("--include", help="only start these components") +@click.option("--exclude", help="don\'t start these components") +@click.option("--env-file", help="env file to be used") +@click.option("--cluster", help="specify a non-default cluster name") +@click.argument('command', required=True) # help: command: up|down|ps +@click.argument('extra_args', nargs=-1) # help: command: up|down|ps +@click.pass_context +def command(ctx, include, exclude, env_file, cluster, command, extra_args): + '''deploy a stack''' + + # TODO: implement option exclusion and command value constraint lost with the move from argparse to click + + debug = ctx.obj.debug + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + local_stack = ctx.obj.local_stack + dry_run = ctx.obj.dry_run + stack = ctx.obj.stack + + cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster) + + # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ + docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, compose_env_file=env_file) + + extra_args_list = list(extra_args) or None + + if not dry_run: + if command == "up": + container_exec_env = _make_runtime_env(ctx.obj) + for attr, value in container_exec_env.items(): + os.environ[attr] = value + if verbose: + print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}") + for pre_start_command in cluster_context.pre_start_commands: + _run_command(ctx.obj, cluster_context.cluster, pre_start_command) + docker.compose.up(detach=True, services=extra_args_list) + for post_start_command in cluster_context.post_start_commands: + _run_command(ctx.obj, cluster_context.cluster, post_start_command) + + _orchestrate_cluster_config(ctx.obj, cluster_context.config, docker, container_exec_env) + + elif command == "down": + if verbose: + print("Running compose down") + + timeout_arg = None + if extra_args_list: + timeout_arg=extra_args_list[0] + + # Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully + docker.compose.down(timeout=timeout_arg) + elif command == "exec": + if extra_args_list is None or len(extra_args_list) < 2: + print("Usage: exec ") + sys.exit(1) + service_name = extra_args_list[0] + command_to_exec = ["sh", "-c"] + extra_args_list[1:] + container_exec_env = _make_runtime_env(ctx.obj) + if verbose: + print(f"Running compose exec {service_name} {command_to_exec}") + try: + docker.compose.execute(service_name, command_to_exec, envs=container_exec_env) + except DockerException as error: + print(f"container command returned error exit status") + elif command == "port": + if extra_args_list is None or len(extra_args_list) < 2: + print("Usage: port ") + sys.exit(1) + service_name = extra_args_list[0] + exposed_port = extra_args_list[1] + if verbose: + print(f"Running compose port {service_name} {exposed_port}") + mapped_port_data = docker.compose.port(service_name, exposed_port) + print(f"{mapped_port_data[0]}:{mapped_port_data[1]}") + elif command == "ps": + if verbose: + print("Running compose ps") + container_list = docker.compose.ps() + if len(container_list) > 0: + print("Running containers:") + for container in container_list: + print(f"id: {container.id}, name: {container.name}, ports: ", end="") + ports = container.network_settings.ports + comma = "" + for port_mapping in ports.keys(): + mapping = ports[port_mapping] + print(comma, end="") + if mapping is None: + print(f"{port_mapping}", end="") + else: + print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="") + comma = ", " + print() + else: + print("No containers running") + elif command == "logs": + if verbose: + print("Running compose logs") + logs_output = docker.compose.logs(services=extra_args_list if extra_args_list is not None else []) + print(logs_output) + + +def get_stack_status(ctx, stack): + + ctx_copy = copy.copy(ctx) + ctx_copy.stack = stack + + cluster_context = _make_cluster_context(ctx_copy, None, None, None) + docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) + # TODO: refactor to avoid duplicating this code above + if ctx.verbose: + print("Running compose ps") + container_list = docker.compose.ps() + if len(container_list) > 0: + if ctx.debug: + print(f"Container list from compose ps: {container_list}") + return True + else: + if ctx.debug: + print("No containers found from compose ps") + False + + +def _make_runtime_env(ctx): + container_exec_env = { + "CERC_HOST_UID": f"{os.getuid()}", + "CERC_HOST_GID": f"{os.getgid()}" + } + container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {}) + return container_exec_env + + +def _make_cluster_context(ctx, include, exclude, cluster): + + if ctx.local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}') + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose") + + if cluster is None: + # Create default unique, stable cluster name from confile file path and stack name if provided + # TODO: change this to the config file path + path = os.path.realpath(sys.argv[0]) + unique_cluster_descriptor = f"{path},{ctx.stack},{include},{exclude}" + if ctx.debug: + print(f"pre-hash descriptor: {unique_cluster_descriptor}") + hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() + cluster = f"laconic-{hash}" + if ctx.verbose: + print(f"Using cluster name: {cluster}") + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file: + all_pods = pod_list_file.read().splitlines() + + pods_in_scope = [] + if ctx.stack: + stack_config = get_parsed_stack_config(ctx.stack) + # TODO: syntax check the input here + pods_in_scope = stack_config['pods'] + cluster_config = stack_config['config'] if 'config' in stack_config else None + else: + pods_in_scope = all_pods + cluster_config = None + + # Convert all pod definitions to v1.1 format + pods_in_scope = _convert_to_new_format(pods_in_scope) + + if ctx.verbose: + print(f"Pods: {pods_in_scope}") + + # Construct a docker compose command suitable for our purpose + + compose_files = [] + pre_start_commands = [] + post_start_commands = [] + for pod in pods_in_scope: + pod_name = pod["name"] + pod_repository = pod["repository"] + pod_path = pod["path"] + if include_exclude_check(pod_name, include, exclude): + if pod_repository is None or pod_repository == "internal": + compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml") + else: + pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"]) + compose_file_name = os.path.join(pod_root_dir, "docker-compose.yml") + pod_pre_start_command = pod["pre_start_command"] + pod_post_start_command = pod["post_start_command"] + if pod_pre_start_command is not None: + pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command)) + if pod_post_start_command is not None: + post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command)) + compose_files.append(compose_file_name) + else: + if ctx.verbose: + print(f"Excluding: {pod_name}") + + if ctx.verbose: + print(f"files: {compose_files}") + + return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config) + + +class cluster_context: + def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config) -> None: + self.cluster = cluster + self.compose_files = compose_files + self.pre_start_commands = pre_start_commands + self.post_start_commands = post_start_commands + self.config = config + + +def _convert_to_new_format(old_pod_array): + new_pod_array = [] + for old_pod in old_pod_array: + if isinstance(old_pod, dict): + new_pod_array.append(old_pod) + else: + new_pod = { + "name": old_pod, + "repository": "internal", + "path": old_pod + } + new_pod_array.append(new_pod) + return new_pod_array + + +def _run_command(ctx, cluster_name, command): + if ctx.verbose: + print(f"Running command: {command}") + command_dir = os.path.dirname(command) + command_file = os.path.join(".", os.path.basename(command)) + command_env = os.environ.copy() + command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name + if ctx.debug: + command_env["CERC_SCRIPT_DEBUG"] = "true" + command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir) + if command_result.returncode != 0: + print(f"FATAL Error running command: {command}") + sys.exit(1) + + +def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env): + + @dataclass + class ConfigDirective: + source_container: str + source_variable: str + destination_container: str + destination_variable: str + + if cluster_config is not None: + for container in cluster_config: + container_config = cluster_config[container] + if ctx.verbose: + print(f"{container} config: {container_config}") + for directive in container_config: + pd = ConfigDirective( + container_config[directive].split(".")[0], + container_config[directive].split(".")[1], + container, + directive + ) + if ctx.verbose: + print(f"Setting {pd.destination_container}.{pd.destination_variable}" + f" = {pd.source_container}.{pd.source_variable}") + # TODO: add a timeout + waiting_for_data = True + while waiting_for_data: + # TODO: fix the script paths so they're consistent between containers + source_value = None + try: + source_value = docker.compose.execute(pd.source_container, + ["sh", "-c", + "sh /docker-entrypoint-scripts.d/export-" + f"{pd.source_variable}.sh"], + tty=False, + envs=container_exec_env) + except DockerException as error: + if ctx.debug: + print(f"Docker exception reading config source: {error}") + # If the script executed failed for some reason, we get: + # "It returned with code 1" + if "It returned with code 1" in str(error): + if ctx.verbose: + print("Config export script returned an error, re-trying") + # If the script failed to execute (e.g. the file is not there) then we get: + # "It returned with code 2" + if "It returned with code 2" in str(error): + print(f"Fatal error reading config source: {error}") + if source_value: + if ctx.debug: + print(f"fetched source value: {source_value}") + destination_output = docker.compose.execute(pd.destination_container, + ["sh", "-c", + f"sh /scripts/import-{pd.destination_variable}.sh" + f" {source_value}"], + tty=False, + envs=container_exec_env) + waiting_for_data = False + if ctx.debug: + print(f"destination output: {destination_output}") diff --git a/build/lib/app/setup_repositories.py b/build/lib/app/setup_repositories.py new file mode 100644 index 00000000..e8c08916 --- /dev/null +++ b/build/lib/app/setup_repositories.py @@ -0,0 +1,182 @@ +# Copyright © 2022 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +# env vars: +# CERC_REPO_BASE_DIR defaults to ~/cerc + +import os +import sys +from decouple import config +import git +from tqdm import tqdm +import click +import importlib.resources +from pathlib import Path +import yaml +from .util import include_exclude_check + + +class GitProgress(git.RemoteProgress): + def __init__(self): + super().__init__() + self.pbar = tqdm(unit='B', ascii=True, unit_scale=True) + + def update(self, op_code, cur_count, max_count=None, message=''): + self.pbar.total = max_count + self.pbar.n = cur_count + self.pbar.refresh() + + +def is_git_repo(path): + try: + _ = git.Repo(path).git_dir + return True + except git.exc.InvalidGitRepositoryError: + return False + +# TODO: find a place for this in the context of click +# parser = argparse.ArgumentParser( +# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" +# ) + + +@click.command() +@click.option("--include", help="only clone these repositories") +@click.option("--exclude", help="don\'t clone these repositories") +@click.option('--git-ssh', is_flag=True, default=False) +@click.option('--check-only', is_flag=True, default=False) +@click.option('--pull', is_flag=True, default=False) +@click.option('--branches-file', help="checkout branches specified in this file") +@click.pass_context +def command(ctx, include, exclude, git_ssh, check_only, pull, branches_file): + '''git clone the set of repositories required to build the complete system from source''' + + quiet = ctx.obj.quiet + verbose = ctx.obj.verbose + dry_run = ctx.obj.dry_run + stack = ctx.obj.stack + + branches = [] + + # TODO: branches file needs to be re-worked in the context of stacks + if branches_file: + if verbose: + print(f"loading branches from: {branches_file}") + with open(branches_file) as branches_file_open: + branches = branches_file_open.read().splitlines() + if verbose: + print(f"Branches are: {branches}") + + local_stack = ctx.obj.local_stack + + if local_stack: + dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] + print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}") + else: + dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) + + if not quiet: + print(f"Dev Root is: {dev_root_path}") + + if not os.path.isdir(dev_root_path): + if not quiet: + print('Dev root directory doesn\'t exist, creating') + os.makedirs(dev_root_path) + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file: + all_repos = repository_list_file.read().splitlines() + + repos_in_scope = [] + if stack: + # In order to be compatible with Python 3.8 we need to use this hack to get the path: + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml") + with stack_file_path: + stack_config = yaml.safe_load(open(stack_file_path, "r")) + # TODO: syntax check the input here + repos_in_scope = stack_config['repos'] + else: + repos_in_scope = all_repos + + if verbose: + print(f"Repos: {repos_in_scope}") + if stack: + print(f"Stack: {stack}") + + repos = [] + for repo in repos_in_scope: + if include_exclude_check(repo, include, exclude): + repos.append(repo) + else: + if verbose: + print(f"Excluding: {repo}") + + def process_repo(repo): + git_ssh_prefix = "git@github.com:" + git_http_prefix = "https://github.com/" + full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo}" + repoName = repo.split("/")[-1] + full_filesystem_repo_path = os.path.join(dev_root_path, repoName) + is_present = os.path.isdir(full_filesystem_repo_path) + if not quiet: + present_text = f"already exists active branch: {git.Repo(full_filesystem_repo_path).active_branch}" if is_present \ + else 'Needs to be fetched' + print(f"Checking: {full_filesystem_repo_path}: {present_text}") + # Quick check that it's actually a repo + if is_present: + if not is_git_repo(full_filesystem_repo_path): + print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository") + sys.exit(1) + else: + if pull: + if verbose: + print(f"Running git pull for {full_filesystem_repo_path}") + if not check_only: + git_repo = git.Repo(full_filesystem_repo_path) + origin = git_repo.remotes.origin + origin.pull(progress=None if quiet else GitProgress()) + else: + print("(git pull skipped)") + if not is_present: + # Clone + if verbose: + print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}') + if not dry_run: + git.Repo.clone_from(full_github_repo_path, + full_filesystem_repo_path, + progress=None if quiet else GitProgress()) + else: + print("(git clone skipped)") + # Checkout the requested branch, if one was specified + if branches: + # Find the current repo in the branches list + for repo_branch in branches: + repo_branch_tuple = repo_branch.split(" ") + if repo_branch_tuple[0] == repo: + # checkout specified branch + branch_to_checkout = repo_branch_tuple[1] + if verbose: + print(f"checking out branch {branch_to_checkout} in repo {repo}") + git_repo = git.Repo(full_filesystem_repo_path) + git_repo.git.checkout(branch_to_checkout) + + for repo in repos: + try: + process_repo(repo) + except git.exc.GitCommandError as error: + print(f"\n******* git command returned error exit status:\n{error}") + sys.exit(1) diff --git a/build/lib/app/util.py b/build/lib/app/util.py new file mode 100644 index 00000000..127de213 --- /dev/null +++ b/build/lib/app/util.py @@ -0,0 +1,50 @@ +# Copyright © 2022, 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import os.path +import sys +import yaml +from pathlib import Path + + +def include_exclude_check(s, include, exclude): + if include is None and exclude is None: + return True + if include is not None: + include_list = include.split(",") + return s in include_list + if exclude is not None: + exclude_list = exclude.split(",") + return s not in exclude_list + + +def get_parsed_stack_config(stack): + # In order to be compatible with Python 3.8 we need to use this hack to get the path: + # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure + stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml") + try: + with stack_file_path: + stack_config = yaml.safe_load(open(stack_file_path, "r")) + return stack_config + except FileNotFoundError as error: + # We try here to generate a useful diagnostic error + # First check if the stack directory is present + stack_directory = stack_file_path.parent + if os.path.exists(stack_directory): + print(f"Error: stack.yml file is missing from stack: {stack}") + else: + print(f"Error: stack: {stack} does not exist") + print(f"Exiting, error: {error}") + sys.exit(1) diff --git a/build/lib/app/version.py b/build/lib/app/version.py new file mode 100644 index 00000000..4194f24a --- /dev/null +++ b/build/lib/app/version.py @@ -0,0 +1,30 @@ +# Copyright © 2023 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click +import importlib.resources + +@click.command() +@click.pass_context +def command(ctx): + '''print tool version''' + + # See: https://stackoverflow.com/a/20885799/1701505 + from . import data + with importlib.resources.open_text(data, "build_tag.txt") as version_file: + # TODO: code better version that skips comment lines + version_string = version_file.read().splitlines()[1] + + print(f"Version: {version_string}") diff --git a/build/lib/cli.py b/build/lib/cli.py new file mode 100644 index 00000000..72b3ba01 --- /dev/null +++ b/build/lib/cli.py @@ -0,0 +1,60 @@ +# Copyright © 2022 Cerc + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from app import setup_repositories +from app import build_containers +from app import build_npms +from app import deploy_system +from app import version + +CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) + + +# TODO: this seems kind of weird and heavy on boilerplate -- check it is +# the best Python can do for us. +class Options(object): + def __init__(self, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error): + self.stack = stack + self.quiet = quiet + self.verbose = verbose + self.dry_run = dry_run + self.local_stack = local_stack + self.debug = debug + self.continue_on_error = continue_on_error + + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.option('--stack', help="specify a stack to build/deploy") +@click.option('--quiet', is_flag=True, default=False) +@click.option('--verbose', is_flag=True, default=False) +@click.option('--dry-run', is_flag=True, default=False) +@click.option('--local-stack', is_flag=True, default=False) +@click.option('--debug', is_flag=True, default=False) +@click.option('--continue-on-error', is_flag=True, default=False) +# See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone +@click.pass_context +def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error): + """Laconic Stack Orchestrator""" + ctx.obj = Options(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error) + + +cli.add_command(setup_repositories.command, "setup-repositories") +cli.add_command(build_containers.command, "build-containers") +cli.add_command(build_npms.command, "build-npms") +cli.add_command(deploy_system.command, "deploy") # deploy is an alias for deploy-system +cli.add_command(deploy_system.command, "deploy-system") +cli.add_command(version.command, "version")