Lotus #392

Merged
iskay merged 11 commits from lotus into main 2023-05-17 21:11:56 +00:00
243 changed files with 0 additions and 12394 deletions
Showing only changes of commit 09b101e730 - Show all commits

View File

@ -1,71 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import os
from abc import ABC, abstractmethod
from .deploy_system import get_stack_status
def get_stack(config, stack):
if stack == "package-registry":
return package_registry_stack(config, stack)
else:
return base_stack(config, stack)
class base_stack(ABC):
def __init__(self, config, stack):
self.config = config
self.stack = stack
@abstractmethod
def ensure_available(self):
pass
@abstractmethod
def get_url(self):
pass
class package_registry_stack(base_stack):
def ensure_available(self):
self.url = "<no registry url set>"
# Check if we were given an external registry URL
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
if url_from_environment:
if self.config.verbose:
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
self.url = url_from_environment
else:
# Otherwise we expect to use the local package-registry stack
# First check if the stack is up
registry_running = get_stack_status(self.config, "package-registry")
if registry_running:
# If it is available, get its mapped port and construct its URL
if self.config.debug:
print("Found local package registry stack is up")
# TODO: get url from deploy-stack
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
else:
# If not, print a message about how to start it and return fail to the caller
print("ERROR: The package-registry stack is not running, and no external registry specified with CERC_NPM_REGISTRY_URL")
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
return False
return True
def get_url(self):
return self.url

View File

@ -1,141 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds or pulls containers for the system components
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
# TODO: display the available list of containers; allow re-build of either all or specific containers
import os
import sys
from decouple import config
import subprocess
import click
import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
# TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@click.command()
@click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers")
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args):
'''build the set of containers required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
debug = ctx.obj.debug
local_stack = ctx.obj.local_stack
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
container_build_dir = Path(__file__).absolute().parent.joinpath("data", "container-build")
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
if not quiet:
print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines()
containers_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
containers_in_scope = stack_config['containers']
else:
containers_in_scope = all_containers
if verbose:
print(f'Containers: {containers_in_scope}')
if stack:
print(f"Stack: {stack}")
# TODO: make this configurable
container_build_env = {
"CERC_NPM_REGISTRY_URL": config("CERC_NPM_REGISTRY_URL", default="http://gitea.local:3000/api/packages/cerc-io/npm/"),
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": "0"
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env:
container_build_env.update({"DOCKER_HOST": docker_host_env})
def process_container(container):
if not quiet:
print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for container in containers_in_scope:
if include_exclude_check(container, include, exclude):
process_container(container)
else:
if verbose:
print(f"Excluding: {container}")

View File

@ -1,171 +0,0 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
# Builds or pulls containers for the system components
# env vars:
# CERC_REPO_BASE_DIR defaults to ~/cerc
import os
import sys
from shutil import rmtree, copytree
from decouple import config
import click
import importlib.resources
from python_on_whales import docker, DockerException
from .base import get_stack
from .util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local"
@click.command()
@click.option('--include', help="only build these packages")
@click.option('--exclude', help="don\'t build these packages")
@click.option("--force-rebuild", is_flag=True, default=False, help="Override existing target package version check -- force rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args):
'''build the set of npm packages required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
local_stack = ctx.obj.local_stack
debug = ctx.obj.debug
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
_ensure_prerequisites()
# build-npms depends on having access to a writable package registry
# so we check here that it is available
package_registry_stack = get_stack(ctx.obj, "package-registry")
registry_available = package_registry_stack.ensure_available()
if not registry_available:
print("FATAL: no npm registry available for build-npms command")
sys.exit(1)
npm_registry_url = package_registry_stack.get_url()
npm_registry_url_token = config("CERC_NPM_AUTH_TOKEN", default=None)
if not npm_registry_url_token:
print("FATAL: CERC_NPM_AUTH_TOKEN is not defined")
sys.exit(1)
if local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
build_root_path = os.path.join(dev_root_path, "build-trees")
if verbose:
print(f'Dev Root is: {dev_root_path}')
if not os.path.isdir(dev_root_path):
print('Dev root directory doesn\'t exist, creating')
os.makedirs(dev_root_path)
if not os.path.isdir(dev_root_path):
print('Build root directory doesn\'t exist, creating')
os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505
from . import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
all_packages = package_list_file.read().splitlines()
packages_in_scope = []
if stack:
stack_config = get_parsed_stack_config(stack)
# TODO: syntax check the input here
packages_in_scope = stack_config['npms']
else:
packages_in_scope = all_packages
if verbose:
print(f'Packages: {packages_in_scope}')
def build_package(package):
if not quiet:
print(f"Building npm package: {package}")
repo_dir = package
repo_full_path = os.path.join(dev_root_path, repo_dir)
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
repo_copy_path = os.path.join(build_root_path, repo_dir)
# First delete any old build tree
if os.path.isdir(repo_copy_path):
if verbose:
print(f"Deleting old build tree: {repo_copy_path}")
if not dry_run:
rmtree(repo_copy_path)
# Now copy the repo into the build tree location
if verbose:
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
if not dry_run:
copytree(repo_full_path, repo_copy_path)
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
if not dry_run:
if verbose:
print(f"Executing: {build_command}")
# Originally we used the PEP 584 merge operator:
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
}
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
try:
docker.run(builder_js_image_name,
remove=True,
interactive=True,
tty=True,
user=f"{os.getuid()}:{os.getgid()}",
envs=envs,
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
add_hosts=[("gitea.local", "host-gateway")],
volumes=[(repo_copy_path, "/workspace")],
command=build_command
)
# Note that although the docs say that build_result should contain
# the command output as a string, in reality it is always the empty string.
# Since we detect errors via catching exceptions below, we can safely ignore it here.
except DockerException as e:
print(f"Error executing build for {package} in container:\n {e}")
if not continue_on_error:
print("FATAL Error: build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for package in packages_in_scope:
if include_exclude_check(package, include, exclude):
build_package(package)
else:
if verbose:
print(f"Excluding: {package}")
def _ensure_prerequisites():
# Check that the builder-js container is available and
# Tell the user how to build it if not
images = docker.image.list(builder_js_image_name)
if len(images) == 0:
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
print("Please run this command to create it: laconic-so --stack build-support build-containers")
sys.exit(1)

View File

@ -1,2 +0,0 @@
# This file should be re-generated running: scripts/create_build_tag_file.sh script
1.1.0-20884d8-202305100258

View File

@ -1,11 +0,0 @@
version: "3.2"
services:
contract:
depends_on:
go-ethereum:
condition: service_healthy
image: cerc/test-contract:local
environment:
ETH_ADDR: "http://go-ethereum:8545"
ports:
- "127.0.0.1:3000:3000"

View File

@ -1,59 +0,0 @@
version: '3.2'
services:
eth-probe-db:
restart: always
image: timescale/timescaledb:latest-pg14
environment:
POSTGRES_USER: "probe"
POSTGRES_DB: "probe"
POSTGRES_PASSWORD: "probe"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
volumes:
- ../../eth-probe/db/schema.sql:/docker-entrypoint-initdb.d/init.sql
ports:
- 5432
eth-probe-mq:
restart: always
image: cerc/eth-probe:local
environment:
MODE: "mq"
PROBE_DEV: "false"
PGPORT: 5432
PGPASSWORD: "probe"
DB_USER: "probe"
PROBE_DB_NAME: "probe"
PROBE_DB_LOCATION: "eth-probe-db"
MQ_HOST: "0.0.0.0"
MQ_PORT: 33333
DEBUG: "vulcanize:*,cerc:*"
healthcheck:
test: [ "CMD", "nc", "-v", "localhost", "33333" ]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
depends_on:
eth-probe-db:
condition: service_healthy
eth-probe-probe:
restart: always
image: cerc/eth-probe:local
environment:
MODE: "probe"
PROBE_DEV: "false"
MQ_HOST: "eth-probe-mq"
MQ_PORT: 33333
PROBE_ID: 0
GETH_HOST: "fixturenet-eth-geth-1"
GETH_MIN_BLOCK: 5
GETHJSON_URL: "http://fixturenet-eth-geth-1:9898/geth.json"
DEBUG: "vulcanize:*,cerc:*,-vulcanize:sniffer:dpt:error"
depends_on:
eth-probe-mq:
condition: service_healthy

View File

@ -1,20 +0,0 @@
version: "3.2"
services:
eth-statediff-fill-service:
restart: unless-stopped
depends_on:
ipld-eth-db:
condition: service_healthy
image: cerc/eth-statediff-fill-service:local
environment:
ETH_SERVER_HTTPPATH: 0.0.0.0:8085
VDB_COMMAND: "serve"
DATABASE_NAME: "cerc_testing"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
DATABASE_USER: "vdbm"
DATABASE_PASSWORD: "password"
ETH_HTTP_PATH: $eth_http_path
WATCHED_ADDRESS_GAP_FILLER_INTERVAL: $watched_address_gap_filler_interval
ports:
- "127.0.0.1:8085:8085"

View File

@ -1,23 +0,0 @@
version: "3.2"
services:
prometheus:
restart: always
image: prom/prometheus
depends_on:
fixturenet-eth-geth-1:
condition: service_healthy
volumes:
- ../config/fixturenet-eth-metrics/prometheus/etc:/etc/prometheus
ports:
- "9090"
grafana:
restart: always
image: grafana/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=changeme6325
volumes:
- ../config/fixturenet-eth-metrics/grafana/etc/provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ../config/fixturenet-eth-metrics/grafana/etc/provisioning/datasources:/etc/grafana/provisioning/datasources
- ../config/fixturenet-eth-metrics/grafana/etc/dashboards:/etc/grafana/dashboards
ports:
- "3000"

View File

@ -1,123 +0,0 @@
version: '3.7'
services:
fixturenet-eth-bootnode-geth:
hostname: fixturenet-eth-bootnode-geth
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_bootnode_geth_data:/root/ethdata
ports:
- "9898"
- "30303"
fixturenet-eth-geth-1:
hostname: fixturenet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_geth_1_data:/root/ethdata
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- fixturenet-eth-bootnode-geth
ports:
- "8545"
- "40000"
- "6060"
fixturenet-eth-geth-2:
hostname: fixturenet-eth-geth-2
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local
depends_on:
- fixturenet-eth-bootnode-geth
volumes:
- fixturenet_eth_geth_2_data:/root/ethdata
fixturenet-eth-bootnode-lighthouse:
hostname: fixturenet-eth-bootnode-lighthouse
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_bootnode_lighthouse_data:/opt/testnet/build/cl
fixturenet-eth-lighthouse-1:
hostname: fixturenet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "1"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_1_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-1:
condition: service_healthy
ports:
- "8001"
fixturenet-eth-lighthouse-2:
hostname: fixturenet-eth-lighthouse-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "2"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-2:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_2_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-2:
condition: service_healthy
volumes:
fixturenet_eth_bootnode_geth_data:
fixturenet_eth_geth_1_data:
fixturenet_eth_geth_2_data:
fixturenet_eth_bootnode_lighthouse_data:
fixturenet_eth_lighthouse_1_data:
fixturenet_eth_lighthouse_2_data:

View File

@ -1,8 +0,0 @@
services:
laconic-console:
restart: unless-stopped
image: cerc/laconic-console-host:local
environment:
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
ports:
- "80"

View File

@ -1,26 +0,0 @@
version: "3.2"
services:
laconicd:
restart: unless-stopped
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# TODO: look at folding these scripts into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473:9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
cli:
image: cerc/laconic-registry-cli:local
volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml

View File

@ -1,68 +0,0 @@
version: "3.8"
services:
lotus-miner:
hostname: lotus-miner
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-miner.sh:/docker-entrypoint-scripts.d/setup-miner.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- $HOME/stack-orchestrator/app/data/config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors
- lotus-shared:/root/.lotus-shared
healthcheck:
# test: ["CMD-SHELL", "grep 'started ChainNotify channel' /var/log/lotus.log"]
# test: ["CMD-SHELL", "[ -f /root/.lotus-shared/miner.addr ]"]
test: ["CMD-SHELL", "[ -d /root/.lotus-miner-local-net ]"]
interval: 10s
timeout: 10s
retries: 10
start_period: 60s
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-miner.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
lotus-node-1:
hostname: lotus-node-1
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
depends_on:
lotus-miner:
condition: service_healthy
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
lotus-node-2:
hostname: lotus-node-2
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
depends_on:
lotus-miner:
condition: service_healthy
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
volumes:
lotus-shared:

View File

@ -1,155 +0,0 @@
version: '3.7'
services:
# Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts)
# Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts:
hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
CERC_L1_RPC: ${CERC_L1_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
# Waits for L1 endpoint to be up before running the script
command: |
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
volumes:
- ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- l2_accounts:/l2-accounts
- l1_deployment:/app/packages/contracts-bedrock
extra_hosts:
- "host.docker.internal:host-gateway"
# Generates the config files required for L2 (outputs to volume l2_config)
op-node-l2-config-gen:
image: cerc/optimism-op-node:local
depends_on:
fixturenet-optimism-contracts:
condition: service_completed_successfully
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/contracts-bedrock:ro
- l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh"
command: "/run-op-geth.sh"
ports:
- "0.0.0.0:8545:8545"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
image: cerc/optimism-op-node:local
depends_on:
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
- "0.0.0.0:8547:8547"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8547"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher:
image: cerc/optimism-op-batcher:local
depends_on:
op-node:
condition: service_healthy
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh
- l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the batcher
command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the proposer (periodically submits new state roots to L1)
op-proposer:
image: cerc/optimism-op-proposer:local
depends_on:
op-node:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
- l1_deployment:/contracts-bedrock:ro
- l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the proposer
command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
l1_deployment:
l2_accounts:
l2_config:
l2_geth_data:

View File

@ -1,8 +0,0 @@
# Add-on pod to include foundry tooling within a fixturenet
services:
foundry:
image: cerc/foundry:local
command: ["while :; do sleep 600; done"]
volumes:
- ../config/foundry/foundry.toml:/foundry.toml
- ./foundry/workspace:/workspace

View File

@ -1,30 +0,0 @@
version: "3.2"
services:
go-ethereum:
restart: unless-stopped
depends_on:
ipld-eth-db:
condition: service_healthy
image: cerc/go-ethereum-foundry:local
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "8545"]
interval: 30s
timeout: 3s
retries: 10
environment:
DB_USER: vdbm
DB_NAME: cerc_testing
DB_HOST: ipld-eth-db
DB_PORT: 5432
DB_PASSWORD: password
DB_WRITE: "true"
DB_TYPE: postgres
DB_DRIVER: sqlx
DB_WAIT_FOR_SYNC: "true"
CHAIN_ID: "99"
USE_GENESIS: "true"
EXTRA_START_ARGS: "--metrics --metrics.expensive --metrics.addr 0.0.0.0 --metrics.port 6060"
ports:
- "127.0.0.1:8545:8545"
- "127.0.0.1:8546:8546"

View File

@ -1,17 +0,0 @@
version: "3.2"
services:
ipld-eth-beacon-db:
restart: always
image: cerc/ipld-eth-beacon-db:local
environment:
POSTGRES_USER: vdbm
POSTGRES_DB: cerc_testing
POSTGRES_PASSWORD: password
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- "127.0.0.1:8076:5432"

View File

@ -1,12 +0,0 @@
version: "3.2"
services:
ipld-eth-beacon-indexer:
restart: unless-stopped
depends_on:
ipld-eth-beacon-db:
condition: service_healthy
fixturenet-eth-lighthouse-1:
condition: service_healthy
image: cerc/ipld-eth-beacon-indexer:local
env_file:
- ../config/ipld-eth-beacon-indexer/indexer.env

View File

@ -1,31 +0,0 @@
version: "3.2"
services:
migrations:
restart: on-failure
depends_on:
ipld-eth-db:
condition: service_healthy
image: cerc/ipld-eth-db:local
environment:
DATABASE_USER: "vdbm"
DATABASE_NAME: "cerc_testing"
DATABASE_PASSWORD: "password"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
ipld-eth-db:
image: timescale/timescaledb:2.8.1-pg14
restart: always
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "cerc_testing"
POSTGRES_PASSWORD: "password"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- "127.0.0.1:8077:5432"

View File

@ -1,46 +0,0 @@
version: "3.2"
services:
ipld-eth-server:
restart: unless-stopped
depends_on:
ipld-eth-db:
condition: service_healthy
image: cerc/ipld-eth-server:local
environment:
IPLD_SERVER_GRAPHQL: "true"
IPLD_POSTGRAPHILEPATH: http://graphql:5000
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing
DATABASE_HOSTNAME: ipld-eth-db
DATABASE_PORT: 5432
DATABASE_USER: "vdbm"
DATABASE_PASSWORD: "password"
ETH_CHAIN_ID: 99
ETH_FORWARD_ETH_CALLS: "false"
ETH_FORWARD_GET_STORAGE_AT: "false"
ETH_PROXY_ON_ERROR: "false"
METRICS: "true"
PROM_HTTP: "true"
PROM_HTTP_ADDR: "0.0.0.0"
PROM_HTTP_PORT: "8090"
LOGRUS_LEVEL: "debug"
CERC_REMOTE_DEBUG: "true"
volumes:
- type: bind
source: ../config/ipld-eth-server/chain.json
target: /tmp/chain.json
ports:
- "8081"
- "8082"
- "8090"
- "40000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8081"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s

View File

@ -1,45 +0,0 @@
version: '3.8'
services:
keycloak-db:
image: postgres:14-alpine
env_file:
- ../config/keycloak/keycloak.env
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
ports:
- 5432
keycloak:
image: cerc/keycloak:local
env_file:
- ../config/keycloak/keycloak.env
environment:
JAVA_OPTS_APPEND: "-Dkeycloak.migration.action=import -Dkeycloak.migration.provider=dir -Dkeycloak.migration.dir=/import -Dkeycloak.migration.strategy=IGNORE_EXISTING"
volumes:
- ../config/keycloak/import:/import
ports:
- 8080
command: ["start"]
depends_on:
keycloak-db:
condition: service_healthy
keycloak-nginx:
image: nginx:1.23-alpine
restart: always
volumes:
- ../config/keycloak/nginx:/etc/nginx/conf.d
ports:
- 80
depends_on:
- keycloak
keycloak-nginx-prometheus-exporter:
image: nginx/nginx-prometheus-exporter
restart: always
environment:
- SCRAPE_URI=http://keycloak-nginx:80/stub_status
depends_on:
- keycloak-nginx

View File

@ -1,13 +0,0 @@
version: "3.2"
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
services:
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
restart: always
volumes:
- ./ipfs/import:/import
- ./ipfs/data:/data/ipfs
ports:
- "0.0.0.0:8080:8080"
- "0.0.0.0:4001:4001"
- "0.0.0.0:5001:5001"

View File

@ -1,17 +0,0 @@
version: "3.2"
services:
laconicd:
restart: unless-stopped
image: cerc/laconicd:local
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"

View File

@ -1,68 +0,0 @@
version: '3.2'
services:
# Builds and serves the MobyMask react-app
mobymask-app:
restart: unless-stopped
image: cerc/mobymask-ui:local
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_CHAIN_ID: ${CERC_CHAIN_ID}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- peers_ids:/peers
- mobymask_deployment:/server
ports:
- "0.0.0.0:3002:80"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Builds and serves the LXDAO version of MobyMask react-app
lxdao-mobymask-app:
restart: unless-stopped
image: cerc/mobymask-ui:local
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_CHAIN_ID: ${CERC_CHAIN_ID}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- peers_ids:/peers
- mobymask_deployment:/server
ports:
- "0.0.0.0:3004:80"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
mobymask_deployment:
peers_ids:

View File

@ -1,30 +0,0 @@
version: '3.2'
services:
peer-test-app:
# Builds and serves the peer-test react-app
image: cerc/react-peer:local
working_dir: /scripts
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
command: ["sh", "test-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
- peers_ids:/peers
ports:
- "0.0.0.0:3003:80"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
peers_ids:

View File

@ -1,7 +0,0 @@
version: "3.2"
services:
test:
image: cerc/test-container:local
restart: always
ports:
- "80"

View File

@ -1,18 +0,0 @@
version: '3.2'
services:
tx-spammer:
restart: always
image: cerc/tx-spammer:local
env_file:
- ../config/tx-spammer/tx-spammer.env
environment:
ACCOUNTS_CSV_URL: http://fixturenet-eth-bootnode-geth:9898/accounts.csv
ETH_HTTP_PATH: http://fixturenet-eth-geth-1:8545
LOG_LEVEL: debug
SPAMMER_COMMAND: autoSend
depends_on:
fixturenet-eth-bootnode-geth:
condition: service_started
fixturenet-eth-geth-1:
condition: service_healthy

View File

@ -1,49 +0,0 @@
version: '3.2'
services:
erc20-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=erc20-watcher,erc20-watcher-job-queue
- POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- erc20_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15433:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
erc20-watcher:
restart: unless-stopped
depends_on:
ipld-eth-server:
condition: service_healthy
erc20-watcher-db:
condition: service_healthy
image: cerc/watcher-erc20:local
environment:
- ETH_RPC_URL=http://go-ethereum:8545
command: ["sh", "-c", "yarn server"]
volumes:
- ../config/watcher-erc20/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports:
- "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
volumes:
erc20_watcher_db_data:

View File

@ -1,49 +0,0 @@
version: '3.2'
services:
erc721-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=erc721-watcher,erc721-watcher-job-queue
- POSTGRES_EXTENSION=erc721-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- erc721_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15434:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
erc721-watcher:
restart: unless-stopped
depends_on:
ipld-eth-server:
condition: service_healthy
erc721-watcher-db:
condition: service_healthy
image: cerc/watcher-erc721:local
environment:
- ETH_RPC_URL=http://go-ethereum:8545
command: ["sh", "-c", "yarn server"]
volumes:
- ../config/watcher-erc721/erc721-watcher.toml:/app/packages/erc721-watcher/environments/local.toml
ports:
- "0.0.0.0:3009:3009"
- "0.0.0.0:9003:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3009"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
volumes:
erc721_watcher_db_data:

View File

@ -1,134 +0,0 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watcher
mobymask-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue
- POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
# Deploys the MobyMask contract and generates an invite link
# Deployment is skipped if CERC_DEPLOYED_CONTRACT env is set
mobymask:
image: cerc/mobymask:local
working_dir: /app/packages/server
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
ENV: "PROD"
CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_PRIVATE_KEY_DEPLOYER: ${CERC_PRIVATE_KEY_DEPLOYER}
CERC_MOBYMASK_APP_BASE_URI: ${CERC_MOBYMASK_APP_BASE_URI}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_L2_GETH_HOST: ${CERC_L2_GETH_HOST}
CERC_L2_GETH_PORT: ${CERC_L2_GETH_PORT}
CERC_L2_NODE_HOST: ${CERC_L2_NODE_HOST}
CERC_L2_NODE_PORT: ${CERC_L2_NODE_PORT}
command: ["sh", "deploy-and-generate-invite.sh"]
volumes:
- ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh
- ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json
- ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh
- mobymask_deployment:/app/packages/server
extra_hosts:
- "host.docker.internal:host-gateway"
# Creates peer-id files if they don't exist
peer-ids-gen:
image: cerc/watcher-ts:local
restart: on-failure
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/peer
command: ["sh", "generate-peer-ids.sh"]
volumes:
- ../config/watcher-mobymask-v2/generate-peer-ids.sh:/app/packages/peer/generate-peer-ids.sh
- peers_ids:/peer-ids
# Starts the mobymask-v2-watcher server
mobymask-watcher-server:
image: cerc/watcher-mobymask-v2:local
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
peer-ids-gen:
condition: service_completed_successfully
mobymask:
condition: service_completed_successfully
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_PRIVATE_KEY_PEER: ${CERC_PRIVATE_KEY_PEER}
CERC_RELAY_PEERS: ${CERC_RELAY_PEERS}
CERC_RELAY_ANNOUNCE_DOMAIN: ${CERC_RELAY_ANNOUNCE_DOMAIN}
CERC_ENABLE_PEER_L2_TXS: ${CERC_ENABLE_PEER_L2_TXS}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
command: ["sh", "start-server.sh"]
volumes:
- ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-mobymask-v2/start-server.sh:/app/start-server.sh
- peers_ids:/app/peers
- mobymask_deployment:/server
# Expose GQL, metrics and relay node ports
ports:
- "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001"
- "0.0.0.0:9090:9090"
healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Container to run peer tests
peer-tests:
image: cerc/watcher-ts:local
restart: on-failure
depends_on:
mobymask-watcher-server:
condition: service_healthy
peer-ids-gen:
condition: service_completed_successfully
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/peer
command:
- sh
- -c
- |
./set-tests-env.sh && \
tail -f /dev/null
volumes:
- ../config/watcher-mobymask-v2/set-tests-env.sh:/app/packages/peer/set-tests-env.sh
- peers_ids:/peer-ids
volumes:
mobymask_watcher_db_data:
peers_ids:
mobymask_deployment:

View File

@ -1,65 +0,0 @@
version: '3.2'
# TODO: remove hard-wired host ports
services:
mobymask-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue
- POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
mobymask-watcher-server:
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn server"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports:
- "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001"
extra_hosts:
- "ipld-eth-server:host-gateway"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
mobymask-watcher-job-runner:
restart: unless-stopped
depends_on:
mobymask-watcher-server:
condition: service_healthy
mobymask-watcher-db:
condition: service_healthy
image: cerc/watcher-mobymask:local
command: ["sh", "-c", "yarn job-runner"]
volumes:
- ../config/watcher-mobymask/mobymask-watcher.toml:/app/packages/mobymask-watcher/environments/local.toml
ports:
- "0.0.0.0:9000:9000"
extra_hosts:
- "ipld-eth-server:host-gateway"
volumes:
mobymask_watcher_db_data:

View File

@ -1,170 +0,0 @@
version: '3.2'
services:
uniswap-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=erc20-watcher,uni-watcher,uni-info-watcher,erc20-watcher-job-queue,uni-watcher-job-queue,uni-info-watcher-job-queue
- POSTGRES_EXTENSION=erc20-watcher-job-queue:pgcrypto,uni-watcher-job-queue:pgcrypto,uni-info-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "work_mem=2GB"]
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- ../config/postgresql/create-pg-stat-statements.sql:/docker-entrypoint-initdb.d/create-pg-stat-statements.sql
- uniswap_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15435:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
shm_size: '8GB'
erc20-watcher-server:
restart: unless-stopped
depends_on:
uniswap-watcher-db:
condition: service_healthy
image: cerc/watcher-uniswap-v3:local
working_dir: /app/packages/erc20-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/server.js"]
volumes:
- ../config/watcher-uniswap-v3/erc20-watcher.toml:/app/packages/erc20-watcher/environments/local.toml
ports:
- "0.0.0.0:3005:3001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
uni-watcher-job-runner:
restart: unless-stopped
depends_on:
uniswap-watcher-db:
condition: service_healthy
image: cerc/watcher-uniswap-v3:local
working_dir: /app/packages/uni-watcher
environment:
- DEBUG=vulcanize:*
command: ["sh", "-c", "./watch-contract.sh && node --enable-source-maps dist/job-runner.js"]
volumes:
- ../config/watcher-uniswap-v3/uni-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-uniswap-v3/watch-contract.sh:/app/packages/uni-watcher/watch-contract.sh
ports:
- "0.0.0.0:9004:9000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
uni-watcher-server:
restart: unless-stopped
depends_on:
uniswap-watcher-db:
condition: service_healthy
uni-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-uniswap-v3:local
environment:
- UNISWAP_START_BLOCK=12369621
- DEBUG=vulcanize:*
working_dir: /app/packages/uni-watcher
command: ["./run.sh"]
volumes:
- ../config/watcher-uniswap-v3/uni-watcher.toml:/app/packages/uni-watcher/environments/local.toml
- ../config/watcher-uniswap-v3/run.sh:/app/packages/uni-watcher/run.sh
ports:
- "0.0.0.0:3003:3003"
- "0.0.0.0:9005:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3003"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
uni-info-watcher-job-runner:
restart: unless-stopped
depends_on:
uniswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
uni-watcher-server:
condition: service_healthy
image: cerc/watcher-uniswap-v3:local
working_dir: /app/packages/uni-info-watcher
environment:
- DEBUG=vulcanize:*
command: ["node", "--enable-source-maps", "dist/job-runner.js"]
volumes:
- ../config/watcher-uniswap-v3/uni-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
ports:
- "0.0.0.0:9006:9002"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
uni-info-watcher-server:
restart: unless-stopped
depends_on:
uniswap-watcher-db:
condition: service_healthy
erc20-watcher-server:
condition: service_healthy
uni-watcher-server:
condition: service_healthy
uni-info-watcher-job-runner:
condition: service_healthy
image: cerc/watcher-uniswap-v3:local
environment:
- UNISWAP_START_BLOCK=12369621
working_dir: /app/packages/uni-info-watcher
command: ["./run.sh"]
volumes:
- ../config/watcher-uniswap-v3/uni-info-watcher.toml:/app/packages/uni-info-watcher/environments/local.toml
- ../config/watcher-uniswap-v3/run.sh:/app/packages/uni-info-watcher/run.sh
ports:
- "0.0.0.0:3004:3004"
- "0.0.0.0:9007:9003"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
uniswap-v3-info:
depends_on:
uni-info-watcher-server:
condition: service_healthy
image: cerc/uniswap-v3-info:local
ports:
- "0.0.0.0:3006:3000"
volumes:
uniswap_watcher_db_data:

View File

@ -1,34 +0,0 @@
global:
scrape_interval: 5s
evaluation_interval: 15s
scrape_configs:
# ipld-eth-server
- job_name: 'ipld-eth-server'
metrics_path: /metrics
scrape_interval: 5s
static_configs:
- targets: ['ipld-eth-server:8090']
# geth
- job_name: 'geth'
metrics_path: /debug/metrics/prometheus
scheme: http
static_configs:
- targets: ['fixturenet-eth-geth-1:6060']
# nginx
- job_name: 'nginx'
scrape_interval: 5s
metrics_path: /metrics
scheme: http
static_configs:
- targets: ['keycloak-nginx-prometheus-exporter:9113']
# keycloak
- job_name: 'keycloak'
scrape_interval: 5s
metrics_path: /auth/realms/cerc/metrics
scheme: http
static_configs:
- targets: ['keycloak:8080']

View File

@ -1,23 +0,0 @@
# The password used to access test accounts (eg, via personal_unlockAccount). The password is the same for all accounts.
ACCOUNT_PASSWORD=secret1212
# ENODE of the geth bootnode.
BOOTNODE_KEY="b0ac22adcad37213c7c565810a50f1772291e7b0ce53fb73e7ec2a3c75bc13b5"
ENODE="enode://af22c29c316ad069cf48a09a4ad5cf04a251b411e45098888d114c6dd7f489a13786620d5953738762afa13711d4ffb3b19aa5de772d8af72f851f7e9c5b164a@fixturenet-eth-bootnode-geth:30303"
# JWT shared by geth and lighthouse for authentication.
JWT="0x6cdcac3501046a08e186730dd8bd136cfaf0fdc1fc955f6e15ad3068c0ff2af0"
# URL to download the ENR of the lighthouse bootnode (generated at first start).
ENR_URL="http://fixturenet-eth-bootnode-lighthouse:3000/bootnode/enr.dat"
# DB connection settings for statediffing (see docker-compose-db.yml)
CERC_STATEDIFF_DB_HOST="ipld-eth-db"
CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=23
CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"

View File

@ -1,118 +0,0 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -1,2 +0,0 @@
#!/bin/sh
laconicd keys show mykey | grep address | cut -d ' ' -f 3

View File

@ -1,2 +0,0 @@
#!/bin/sh
echo y | laconicd keys export mykey --unarmored-hex --unsafe

View File

@ -1,9 +0,0 @@
services:
cns:
restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY
bondId:
chainId: laconic_9000-1
gas: 250000
fees: 200000aphoton

View File

@ -1,108 +0,0 @@
{
"NetworkVersion": 18,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf"
},
"CommD": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq"
},
"CommD": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-6d52dae5-ff29-4bac-a45d-f84e6c07564c",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -1,5 +0,0 @@
LOTUS_PATH=~/.lotus-local-net
LOTUS_MINER_PATH=~/.lotus-miner-local-net
LOTUS_SKIP_GENESIS_CHECK=_yes_
CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
CGO_CFLAGS="-D__BLST_PORTABLE__"

View File

@ -1,31 +0,0 @@
#!/bin/bash
lotus --version
# # remove old bootnode peer info if present
# [ -f /root/.lotus-shared/miner.addr ] && rm /root/.lotus-shared/miner.addr
# start daemon
nohup lotus daemon --genesis=/devgen.car --profile=bootstrapper --bootstrap=false > /var/log/lotus.log 2>&1 &
# Loop until the daemon is started
echo "Waiting for daemon to start..."
while ! grep -q "started ChainNotify channel" /var/log/lotus.log ; do
sleep 5
done
echo "Daemon started."
# publish bootnode peer info to shared volume
lotus net listen | awk 'NR==1{print}' > /root/.lotus-shared/miner.addr
# if miner not already initialized
if [ ! -d /root/.lotus-miner-local-net ]; then
# initialize miner
lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
fi
# start miner
nohup lotus-miner run --nosync &
tail -f /dev/null

View File

@ -1,24 +0,0 @@
#!/bin/bash
lotus --version
##TODO: paths can use values from lotus-env.env file
# if not already initialized
if [ ! -f /root/.lotus-local-net/config.toml ]; then
# init node config
mkdir $HOME/.lotus-local-net
lotus config default > $HOME/.lotus-local-net/config.toml
# add bootstrap peer info if available
if [ -f /root/.lotus-shared/miner.addr ]; then
MINER_ADDR=\"$(cat /root/.lotus-shared/miner.addr)\"
# add bootstrap peer id to config file
sed -i "/^\[Libp2p\]/a \ \ BootstrapPeers = [$MINER_ADDR]" $HOME/.lotus-local-net/config.toml
else
echo "Bootstrap peer info not found, unable to configure. Manual peering will be required."
fi
fi
# start node
lotus daemon --genesis=/devgen.car

View File

@ -1,37 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
--outfile.l2 /app/genesis.json \
--outfile.rollup /app/rollup.json \
--l1-rpc $CERC_L1_RPC
openssl rand -hex 32 > /app/jwt.txt

View File

@ -1,12 +0,0 @@
# Defaults
# L1 endpoint
DEFAULT_CERC_L1_CHAIN_ID=1212
DEFAULT_CERC_L1_RPC="http://fixturenet-eth-geth-1:8545"
DEFAULT_CERC_L1_HOST="fixturenet-eth-geth-1"
DEFAULT_CERC_L1_PORT=8545
# URL to get CSV with credentials for accounts on L1
# that are used to send balance to Optimism Proxy contract
# (enables them to do transactions on L2)
DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv"

View File

@ -1,131 +0,0 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
# Read JSON file into variable
KEYS_JSON=$(cat /l2-accounts/keys.json)
# Parse JSON into variables
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Get the private keys of L1 accounts
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using them from env"
fi
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
echo "Balances sent to L2 accounts"
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
echo "Updated the deployment config"
# Create a .env file
echo "L1_RPC=$CERC_L1_RPC" > .env
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
echo "Deploying the L1 smart contracts, this will take a while..."
# Deploy the L1 smart contracts
yarn hardhat deploy --network getting-started --tags l1
echo "Deployed the L1 smart contracts"
# Read Proxy contract's JSON and get the address
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
# First account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
# Second account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
echo "Balance sent to Proxy L2 contract"
echo "Use following accounts for transactions in L2:"
echo "${CERC_L1_ADDRESS}"
echo "${CERC_L1_ADDRESS_2}"
echo "Done"

View File

@ -1,36 +0,0 @@
const fs = require('fs')
// Get the command-line argument
const configFile = process.argv[2]
const adminAddress = process.argv[3]
const proposerAddress = process.argv[4]
const batcherAddress = process.argv[5]
const sequencerAddress = process.argv[6]
const blockHash = process.argv[7]
// Read the JSON file
const configData = fs.readFileSync(configFile)
const configObj = JSON.parse(configData)
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
configObj.finalSystemOwner =
configObj.portalGuardian =
configObj.controller =
configObj.l2OutputOracleChallenger =
configObj.proxyAdminOwner =
configObj.baseFeeVaultRecipient =
configObj.l1FeeVaultRecipient =
configObj.sequencerFeeVaultRecipient =
configObj.governanceTokenOwner =
adminAddress
configObj.l2OutputOracleProposer = proposerAddress
configObj.batchSenderAddress = batcherAddress
configObj.p2pSequencerAddress = sequencerAddress
configObj.l1StartingBlockTag = blockHash
// Write the updated JSON object back to the file
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))

View File

@ -1,39 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Get Batcher key from keys.json
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
cleanup() {
echo "Signal received, cleaning up..."
kill ${batcher_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-batcher
op-batcher \
--l2-eth-rpc=http://op-geth:8545 \
--rollup-rpc=http://op-node:8547 \
--poll-interval=1s \
--sub-safety-margin=6 \
--num-confirmations=1 \
--safe-abort-nonce-too-low-count=3 \
--resubmission-timeout=30s \
--rpc.addr=0.0.0.0 \
--rpc.port=8548 \
--rpc.enable-admin \
--max-channel-duration=1 \
--l1-eth-rpc=$CERC_L1_RPC \
--private-key=$BATCHER_KEY \
&
batcher_pid=$!
wait $batcher_pid

View File

@ -1,90 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO: Add in container build or use other tool
echo "Installing jq"
apk update && apk add jq
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Initialize op-geth if datadir/geth not found
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
echo "Found existing datadir, checking block signer key"
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
echo "Sequencer and block signer keys match, skipping initialization"
else
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
exit 1
fi
else
echo "Initializing op-geth"
mkdir -p datadir
echo "pwd" > datadir/password
echo $SEQUENCER_KEY > datadir/block-signer-key
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
while [ ! -f "/op-node/jwt.txt" ]
do
echo "Config files not created. Checking after 5 seconds."
sleep 5
done
echo "Config files created by op-node, proceeding with the initialization..."
geth init --datadir=datadir /op-node/genesis.json
echo "Node Initialized"
fi
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
cleanup() {
echo "Signal received, cleaning up..."
kill ${geth_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-geth
geth \
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=/op-node/jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=$SEQUENCER_ADDRESS \
--unlock=$SEQUENCER_ADDRESS \
&
geth_pid=$!
wait $geth_pid

View File

@ -1,26 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Run op-node
op-node \
--l2=http://op-geth:8551 \
--l2.jwt-secret=/op-node-data/jwt.txt \
--sequencer.enabled \
--sequencer.l1-confs=3 \
--verifier.l1-confs=3 \
--rollup.config=/op-node-data/rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=$SEQUENCER_KEY \
--l1=$CERC_L1_RPC \
--l1.rpckind=any

View File

@ -1,36 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Read the L2OutputOracle contract address from the deployment
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
# Get Proposer key from keys.json
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
cleanup() {
echo "Signal received, cleaning up..."
kill ${proposer_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-proposer
op-proposer \
--poll-interval 12s \
--rpc.port 8560 \
--rollup-rpc http://op-node:8547 \
--l2oo-address $L2OO_ADDR \
--private-key $PROPOSER_KEY \
--l1-eth-rpc $CERC_L1_RPC \
&
proposer_pid=$!
wait $proposer_pid

View File

@ -1,2 +0,0 @@
[profile.default]
eth-rpc-url = "http://fixturenet-eth-geth-1:8545"

View File

@ -1,31 +0,0 @@
# Match compose/docker-compose-ipld-eth-beacon-db.yml
POSTGRES_HOST=ipld-eth-beacon-db
POSTGRES_PORT=5432
POSTGRES_DB=cerc_testing
POSTGRES_USER=vdbm
POSTGRES_PASSWORD=password
# Match compose/docker-compose-fixturenet-eth.yml
LIGHTHOUSE_HOST=fixturenet-eth-lighthouse-1
LIGHTHOUSE_PORT=8001
LIGHTHOUSE_PROTOCOL=http
CAPTURE_MODE=head
LOG_LEVEL=debug
BC_MAX_HISTORIC_PROCESS_WORKER=2
BC_UNIQUE_NODE_IDENTIFIER=1001
BC_CHECK_DB=true
BC_BEACON_STATE_PROCESSING_ENABLED=false
BC_BEACON_BLOCK_PROCESSING_ENABLED=true
BC_MINIMUM_SLOT=0
KG_INCREMENT=10000
KG_PROCESS_KNOWN_GAPS_ENABLED=true
KG_MAX_KNOWN_GAPS_WORKER=2
KG_MINIMUM_SLOT=0
# Match compose/docker-compose-prometheus-grafana.yml
PROM_HOST=prometheus
PROM_PORT=9000
PROM_METRICS_ENABLED=false

View File

@ -1,15 +0,0 @@
{
"chainId": 99,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"clique": {
"period": 0,
"epoch": 3000
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
POSTGRES_DB=keycloak
POSTGRES_USER=keycloak
POSTGRES_PASSWORD=keycloak
KC_DB=postgres
KC_DB_URL_HOST=keycloak-db
KC_DB_URL_DATABASE=${POSTGRES_DB}
KC_DB_USERNAME=${POSTGRES_USER}
KC_DB_PASSWORD=${POSTGRES_PASSWORD}
KC_DB_SCHEMA=public
KC_HOSTNAME=localhost
KC_HTTP_ENABLED="true"
KC_HTTP_RELATIVE_PATH="/auth"
KC_HOSTNAME_STRICT_HTTPS="false"
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
X_API_CHECK_REALM=cerc
X_API_CHECK_CLIENT_ID="%api_key%"

View File

@ -1,72 +0,0 @@
server {
listen 80;
listen [::]:80;
server_name localhost;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
### geth
location ~ ^/eth/?([^/]*)$ {
set $apiKey $1;
if ($apiKey = '') {
set $apiKey $http_X_API_KEY;
}
auth_request /auth;
proxy_buffering off;
rewrite /.*$ / break;
proxy_pass http://fixturenet-eth-geth-1:8545;
}
## ipld-eth-server
# location ~ ^/ipld/eth/([^/]*)$ {
# set $apiKey $1;
# if ($apiKey = '') {
# set $apiKey $http_X_API_KEY;
# }
# auth_request /auth;
# auth_request_set $user_id $sent_http_x_user_id;
# proxy_buffering off;
# rewrite /.*$ / break;
# proxy_pass http://ipld-eth-server:8081;
# proxy_set_header X-Original-Remote-Addr $remote_addr;
# proxy_set_header X-User-Id $user_id;
# }
#
# location ~ ^/ipld/gql/([^/]*)$ {
# set $apiKey $1;
# if ($apiKey = '') {
# set $apiKey $http_X_API_KEY;
# }
# auth_request /auth;
# proxy_buffering off;
# rewrite /.*$ / break;
# proxy_pass http://ipld-eth-server:8082;
# }
## lighthouse
location /beacon/ {
set $apiKey $http_X_API_KEY;
auth_request /auth;
proxy_buffering off;
proxy_pass http://fixturenet-eth-lighthouse-1:8001/;
}
location = /auth {
internal;
proxy_buffering off;
resolver 127.0.0.11 ipv6=off;
proxy_pass http://keycloak:8080/auth/realms/cerc/check?apiKey=$apiKey;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Original-Remote-Addr $remote_addr;
proxy_set_header X-Original-Host $host;
}
location = /stub_status {
stub_status;
}
}

View File

@ -1 +0,0 @@
CREATE EXTENSION pg_stat_statements;

View File

@ -1,38 +0,0 @@
#!/bin/bash
set -e
set -u
function create_user_and_database() {
local database=$1
echo " Creating user and database '$database'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE DATABASE "$database";
GRANT ALL PRIVILEGES ON DATABASE "$database" TO $POSTGRES_USER;
EOSQL
}
function create_extension() {
local database=$(echo $1 | tr ':' ' ' | awk '{print $1}')
local extension=$(echo $1 | tr ':' ' ' | awk '{print $2}')
echo " Creating database '$database' extension '$extension'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" "$database" <<-EOSQL
CREATE EXTENSION "$extension";
EOSQL
}
if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then
echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES"
for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do
create_user_and_database $db
done
echo "Multiple databases created"
fi
if [ -n "$POSTGRES_EXTENSION" ]; then
echo "Extension database creation requested: $POSTGRES_EXTENSION"
for db in $(echo $POSTGRES_EXTENSION | tr ',' ' '); do
create_extension $db
done
echo "Extensions created"
fi

View File

@ -1,2 +0,0 @@
ETH_CALL_FREQ=1000
ETH_SEND_FREQ=1000

View File

@ -1,182 +0,0 @@
#!/usr/bin/env bash
# Use this script to test if a given TCP host/port are available
WAITFORIT_cmdname=${0##*/}
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
-s | --strict Only execute subcommand if the test succeeds
-q | --quiet Don't output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
wait_for()
{
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
else
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
fi
WAITFORIT_start_ts=$(date +%s)
while :
do
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
WAITFORIT_result=$?
else
(echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
WAITFORIT_result=$?
fi
if [[ $WAITFORIT_result -eq 0 ]]; then
WAITFORIT_end_ts=$(date +%s)
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
break
fi
sleep 1
done
return $WAITFORIT_result
}
wait_for_wrapper()
{
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
else
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
fi
WAITFORIT_PID=$!
trap "kill -INT -$WAITFORIT_PID" INT
wait $WAITFORIT_PID
WAITFORIT_RESULT=$?
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
fi
return $WAITFORIT_RESULT
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
*:* )
WAITFORIT_hostport=(${1//:/ })
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
shift 1
;;
--child)
WAITFORIT_CHILD=1
shift 1
;;
-q | --quiet)
WAITFORIT_QUIET=1
shift 1
;;
-s | --strict)
WAITFORIT_STRICT=1
shift 1
;;
-h)
WAITFORIT_HOST="$2"
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
WAITFORIT_HOST="${1#*=}"
shift 1
;;
-p)
WAITFORIT_PORT="$2"
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
WAITFORIT_PORT="${1#*=}"
shift 1
;;
-t)
WAITFORIT_TIMEOUT="$2"
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
WAITFORIT_TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
WAITFORIT_CLI=("$@")
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
echoerr "Error: you need to provide a host and port to test."
usage
fi
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
# Check to see if timeout is from busybox?
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
WAITFORIT_BUSYTIMEFLAG=""
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
WAITFORIT_ISBUSY=1
# Check if busybox timeout uses -t flag
# (recent Alpine versions don't support -t anymore)
if timeout &>/dev/stdout | grep -q -e '-t '; then
WAITFORIT_BUSYTIMEFLAG="-t"
fi
else
WAITFORIT_ISBUSY=0
fi
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
wait_for
WAITFORIT_RESULT=$?
exit $WAITFORIT_RESULT
else
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
wait_for_wrapper
WAITFORIT_RESULT=$?
else
wait_for
WAITFORIT_RESULT=$?
fi
fi
if [[ $WAITFORIT_CLI != "" ]]; then
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
exit $WAITFORIT_RESULT
fi
exec "${WAITFORIT_CLI[@]}"
else
exit $WAITFORIT_RESULT
fi

View File

@ -1,41 +0,0 @@
[server]
host = "0.0.0.0"
port = 3001
mode = "storage"
kind = "lazy"
[metrics]
host = "127.0.0.1"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "erc20-watcher-db"
port = 5432
database = "erc20-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server:8082/graphql"
rpcProviderEndpoint = "http://ipld-eth-server:8081"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@erc20-watcher-db:5432/erc20-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = true
prefetchBlockCount = 10

View File

@ -1,56 +0,0 @@
[server]
host = "0.0.0.0"
port = 3009
kind = "lazy"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 2000
# Enable state creation
enableState = true
# Boolean to filter logs by contract.
filterLogs = false
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = 1000
[metrics]
host = "127.0.0.1"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "erc721-watcher-db"
port = 5432
database = "erc721-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server:8082/graphql"
rpcProviderEndpoint = "http://ipld-eth-server:8081"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@erc721-watcher-db:5432/erc721-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = true
prefetchBlockCount = 10

View File

@ -1,89 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
CERC_MOBYMASK_APP_BASE_URI="${CERC_MOBYMASK_APP_BASE_URI:-${DEFAULT_CERC_MOBYMASK_APP_BASE_URI}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
# Check if CERC_DEPLOYED_CONTRACT environment variable set to skip contract deployment
if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then
echo "CERC_DEPLOYED_CONTRACT is set to '$CERC_DEPLOYED_CONTRACT'"
echo "Skipping contract deployment"
exit 0
fi
echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}"
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
# Read the private key of an L1 account to deploy contract
CERC_PRIVATE_KEY_DEPLOYER=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
else
echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_DEPLOYER from env"
fi
# Set the private key
jq --arg privateKey "$CERC_PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json
# Set the RPC URL
jq --arg rpcUrl "$CERC_L2_GETH_RPC" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
# Set the MobyMask app base URI
jq --arg baseURI "$CERC_MOBYMASK_APP_BASE_URI" '.baseURI = $baseURI' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
# Wait for L2 Optimism Geth and Node servers to be up before deploying contract
CERC_L2_GETH_HOST="${CERC_L2_GETH_HOST:-${DEFAULT_CERC_L2_GETH_HOST}}"
CERC_L2_GETH_PORT="${CERC_L2_GETH_PORT:-${DEFAULT_CERC_L2_GETH_PORT}}"
CERC_L2_NODE_HOST="${CERC_L2_NODE_HOST:-${DEFAULT_CERC_L2_NODE_HOST}}"
CERC_L2_NODE_PORT="${CERC_L2_NODE_PORT:-${DEFAULT_CERC_L2_NODE_PORT}}"
./wait-for-it.sh -h "${CERC_L2_GETH_HOST}" -p "${CERC_L2_GETH_PORT}" -s -t 0
./wait-for-it.sh -h "${CERC_L2_NODE_HOST}" -p "${CERC_L2_NODE_PORT}" -s -t 0
export RPC_URL="${CERC_L2_GETH_RPC}"
# Check and exit if a deployment already exists (on restarts)
if [ -f ./config.json ]; then
echo "config.json already exists, checking the contract deployment"
# Read JSON file
DEPLOYMENT_DETAILS=$(cat config.json)
CONTRACT_ADDRESS=$(echo "$DEPLOYMENT_DETAILS" | jq -r '.address')
cd ../hardhat
if yarn verifyDeployment --network optimism --contract "${CONTRACT_ADDRESS}"; then
echo "Deployment verfication successful"
cd ../server
else
echo "Deployment verfication failed, please clear MobyMask deployment volume before starting"
exit 1
fi
fi
# Wait until balance for deployer account is updated
cd ../hardhat
while true; do
ACCOUNT_BALANCE=$(yarn balance --network optimism "$CERC_PRIVATE_KEY_DEPLOYER" | grep ETH)
if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then
echo "Account balance updated: $ACCOUNT_BALANCE"
break # exit the loop
fi
echo "Account balance not updated: $ACCOUNT_BALANCE"
echo "Checking after 2 seconds"
sleep 2
done
cd ../server
npm run deployAndGenerateInvite

View File

@ -1,20 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Check for peer ids in ./peers folder, create if not present
if [ -f /peer-ids/relay-id.json ]; then
echo "Using peer id for relay node from the mounted volume"
else
echo "Creating a new peer id for relay node"
yarn create-peer -f /peer-ids/relay-id.json
fi
if [ -f /peer-ids/peer-id.json ]; then
echo "Using peer id for peer node from the mounted volume"
else
echo "Creating a new peer id for peer node"
yarn create-peer -f /peer-ids/peer-id.json
fi

View File

@ -1,41 +0,0 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_CHAIN_ID="${CERC_CHAIN_ID:-${DEFAULT_CERC_CHAIN_ID}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}"
CERC_APP_WATCHER_URL="${CERC_APP_WATCHER_URL:-${DEFAULT_CERC_APP_WATCHER_URL}}"
# If not set (or []), check the mounted volume for relay peer id
if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then
echo "CERC_RELAY_NODES not provided, taking from the mounted volume"
CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]"
fi
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
if [ -z "$CERC_DEPLOYED_CONTRACT" ]; then
# Use config from mounted volume (when running web-app along with watcher stack)
echo "Taking config for deployed contract from mounted volume"
while [ ! -f /server/config.json ]; do
echo "Config not found, retrying after 5 seconds"
sleep 5
done
# Get deployed contract address and chain id
CERC_DEPLOYED_CONTRACT=$(jq -r '.address' /server/config.json | tr -d '"')
CERC_CHAIN_ID=$(jq -r '.chainId' /server/config.json)
else
echo "Taking deployed contract details from env"
fi
# Use yq to create config.yml with environment variables
yq -n ".address = env(CERC_DEPLOYED_CONTRACT)" > /config/config.yml
yq ".watcherUrl = env(CERC_APP_WATCHER_URL)" -i /config/config.yml
yq ".chainId = env(CERC_CHAIN_ID)" -i /config/config.yml
yq ".relayNodes = strenv(CERC_RELAY_NODES)" -i /config/config.yml
/scripts/start-serving-app.sh

View File

@ -1,26 +0,0 @@
# Defaults
# Watcher endpoint
DEFAULT_CERC_APP_WATCHER_URL="http://localhost:3001"
# Set of relay peers to connect to from the relay node
DEFAULT_CERC_RELAY_PEERS=[]
# Domain to be used in the relay node's announce address
DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN=
# Base URI for mobymask-app (used for generating invite)
DEFAULT_CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#"
# Set to false for disabling watcher peer to send txs to L2
DEFAULT_CERC_ENABLE_PEER_L2_TXS=true
# Set deployed MobyMask contract address to avoid deploying contract in stack
# mobymask-app will use this contract address in config if run separately
DEFAULT_CERC_DEPLOYED_CONTRACT=
# Chain ID is used by mobymask web-app for txs
DEFAULT_CERC_CHAIN_ID=42069
# Set of relay nodes to be used by web-apps
DEFAULT_CERC_RELAY_NODES=[]

View File

@ -1,14 +0,0 @@
# Defaults
# L2 endpoints
DEFAULT_CERC_L2_GETH_RPC="http://op-geth:8545"
# Endpoints waited on before contract deployment
DEFAULT_CERC_L2_GETH_HOST="op-geth"
DEFAULT_CERC_L2_GETH_PORT=8545
DEFAULT_CERC_L2_NODE_HOST="op-node"
DEFAULT_CERC_L2_NODE_PORT=8547
# URL to get CSV with credentials for accounts on L1 to perform txs on L2
DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv"

View File

@ -1,5 +0,0 @@
{
"rpcUrl": "",
"privateKey": "",
"baseURI": ""
}

View File

@ -1,10 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /peer-ids/relay-id.json)"
# Write the relay node's multiaddr to /app/packages/peer/.env for running tests
echo "RELAY=\"$CERC_RELAY_MULTIADDR\"" > ./.env

View File

@ -1,56 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
CERC_RELAY_PEERS="${CERC_RELAY_PEERS:-${DEFAULT_CERC_RELAY_PEERS}}"
CERC_RELAY_ANNOUNCE_DOMAIN="${CERC_RELAY_ANNOUNCE_DOMAIN:-${DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN}}"
CERC_ENABLE_PEER_L2_TXS="${CERC_ENABLE_PEER_L2_TXS:-${DEFAULT_CERC_ENABLE_PEER_L2_TXS}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}"
CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /app/peers/relay-id.json)"
# Use contract address from environment variable or set from config.json in mounted volume
if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then
CONTRACT_ADDRESS="${CERC_DEPLOYED_CONTRACT}"
else
# Assign deployed contract address from server config (created by mobymask container after deploying contract)
CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"')
fi
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
# Read the private key of an L1 account for sending txs from peer
CERC_PRIVATE_KEY_PEER=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_PEER from env"
fi
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_RELAY_PEERS|${CERC_RELAY_PEERS}|g; \
s/REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN/${CERC_RELAY_ANNOUNCE_DOMAIN}/g; \
s|REPLACE_WITH_CERC_RELAY_MULTIADDR|${CERC_RELAY_MULTIADDR}|g; \
s/REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS/${CERC_ENABLE_PEER_L2_TXS}/g; \
s/REPLACE_WITH_CERC_PRIVATE_KEY_PEER/${CERC_PRIVATE_KEY_PEER}/g; \
s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/g; \
s|REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT|${CERC_L2_GETH_RPC}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
echo 'yarn server'
yarn server

View File

@ -1,6 +0,0 @@
{
"relayNodes": [],
"peer": {
"enableDebugInfo": true
}
}

View File

@ -1,20 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}"
# If not set (or []), check the mounted volume for relay peer id
if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then
echo "CERC_RELAY_NODES not provided, taking from the mounted volume"
CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]"
fi
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
# Use yq to create config.yml with environment variables
yq -n ".relayNodes = strenv(CERC_RELAY_NODES)" > /config/config.yml
/scripts/start-serving-app.sh

View File

@ -1,76 +0,0 @@
[server]
host = "0.0.0.0"
port = 3001
kind = "lazy"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 2000
# Enable state creation
enableState = true
# Boolean to filter logs by contract.
filterLogs = true
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = -1
[server.p2p]
enableRelay = true
enablePeer = true
[server.p2p.relay]
host = "0.0.0.0"
port = 9090
relayPeers = REPLACE_WITH_CERC_RELAY_PEERS
peerIdFile = './peers/relay-id.json'
announce = 'REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN'
enableDebugInfo = true
[server.p2p.peer]
relayMultiaddr = 'REPLACE_WITH_CERC_RELAY_MULTIADDR'
pubSubTopic = 'mobymask'
peerIdFile = './peers/peer-id.json'
enableDebugInfo = true
enableL2Txs = REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS
[server.p2p.peer.l2TxsConfig]
privateKey = 'REPLACE_WITH_CERC_PRIVATE_KEY_PEER'
contractAddress = 'REPLACE_WITH_CONTRACT_ADDRESS'
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "mobymask-watcher-db"
port = 5432
database = "mobymask-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server:8083/graphql"
rpcProviderEndpoint = "REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT"
blockDelayInMilliSecs = 60000
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@mobymask-watcher-db/mobymask-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50

View File

@ -1,53 +0,0 @@
[server]
host = "0.0.0.0"
port = 3001
kind = "active"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 2000
# IPFS API address (can be taken from the output on running the IPFS daemon).
# ipfsApiAddr = "/ip4/127.0.0.1/tcp/5001"
# Boolean to filter logs by contract.
filterLogs = true
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = -1
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "mobymask-watcher-db"
port = 5432
database = "mobymask-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server:8083/graphql"
rpcProviderEndpoint = "http://ipld-eth-server:8082"
blockDelayInMilliSecs = 60000
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@mobymask-watcher-db/mobymask-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50

View File

@ -1,39 +0,0 @@
[server]
host = "0.0.0.0"
port = 3001
mode = "eth_call"
kind = "lazy"
[metrics]
host = "127.0.0.1"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "uniswap-watcher-db"
port = 5432
database = "erc20-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql"
rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/erc20-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000

View File

@ -1,10 +0,0 @@
#!/bin/sh
set -e
set -u
echo "Initializing watcher..."
yarn fill --start-block $UNISWAP_START_BLOCK --end-block $((UNISWAP_START_BLOCK + 1))
echo "Running active server"
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js

View File

@ -1,90 +0,0 @@
[server]
host = "0.0.0.0"
port = 3004
mode = "prod"
kind = "active"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 50000
# Enable state creation
enableState = false
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = 1000
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
# Boolean to skip updating entity fields required in state creation and not required in the frontend.
skipStateFieldsUpdate = false
# Boolean to load GQL query nested entity relations sequentially.
loadRelationsSequential = false
# Max GQL API requests to process simultaneously (defaults to 1).
maxSimultaneousRequests = 1
# GQL cache settings
[server.gqlCache]
enabled = true
# Max in-memory cache size (in bytes) (default 8 MB)
# maxCacheSize
# GQL cache-control max-age settings (in seconds)
maxAge = 15
timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
port = 9002
[metrics.gql]
port = 9003
[database]
type = "postgres"
host = "uniswap-watcher-db"
port = 5432
database = "uni-info-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[database.extra]
# maximum number of clients the pool should contain
max = 20
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql"
rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[upstream.uniWatcher]
gqlEndpoint = "http://uni-watcher-server:3003/graphql"
gqlSubscriptionEndpoint = "ws://uni-watcher-server:3003/graphql"
[upstream.tokenWatcher]
gqlEndpoint = "http://erc20-watcher-server:3001/graphql"
gqlSubscriptionEndpoint = "ws://erc20-watcher-server:3001/graphql"
[jobQueue]
dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/uni-info-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 1000
eventsInBatch = 50
subgraphEventsOrder = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = true
prefetchBlockCount = 10

View File

@ -1,41 +0,0 @@
[server]
host = "0.0.0.0"
port = 3003
kind = "active"
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "uniswap-watcher-db"
port = 5432
database = "uni-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
maxQueryExecutionTime = 100
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server.example.com:8083/graphql"
rpcProviderEndpoint = "http://ipld-eth-server.example.com:8082"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@uniswap-watcher-db:5432/uni-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 0
eventsInBatch = 50
lazyUpdateBlockProgress = true
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = true
prefetchBlockCount = 10

View File

@ -1,10 +0,0 @@
#!/bin/sh
set -e
set -u
echo "Watching factory contract 0x1F98431c8aD98523631AE4a59f267346ea31F984"
yarn watch:contract --address 0x1F98431c8aD98523631AE4a59f267346ea31F984 --kind factory --startingBlock 12369621 --checkpoint
echo "Watching nfpm contract 0xC36442b4a4522E871399CD717aBDD847Ab11FE88"
yarn watch:contract --address 0xC36442b4a4522E871399CD717aBDD847Ab11FE88 --kind nfpm --startingBlock 12369651 --checkpoint

View File

@ -1,13 +0,0 @@
# source'ed into container build scripts to do generic command setup
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
echo "Build environment variables:"
env
fi
build_command_args=""
if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then
build_command_args="${build_command_args} --no-cache"
fi
if [[ -n "$CERC_CONTAINER_EXTRA_BUILD_ARGS" ]]; then
build_command_args="${build_command_args} ${CERC_CONTAINER_EXTRA_BUILD_ARGS}"
fi

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
# Build a local version of the task executor for act-runner
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/gitea/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR}

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# Build a local version of the act-runner image
# TODO: enhance the default build code path to cope with this container (repo has an _ which needs to be converted to - in the image tag)
docker build -t cerc/act-runner:local -f ${CERC_REPO_BASE_DIR}/act_runner/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/act_runner

View File

@ -1,31 +0,0 @@
# From: https://github.com/vyzo/gerbil/blob/master/docker/Dockerfile
FROM gerbil/ubuntu
# Install the Solidity compiler (latest stable version)
# and guile
# and libsecp256k1-dev
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \
apt-get install -y software-properties-common && \
add-apt-repository ppa:ethereum/ethereum && \
apt-get update && \
apt-get install -y solc && \
apt-get install -y guile-3.0 && \
apt-get install -y libsecp256k1-dev && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN mkdir /scripts
COPY install-dependencies.sh /scripts
# Override the definition of GERBIL_PATH in the base image, but
# is safe because (at present) no gerbil packages are installed in the base image
# We do this in order to allow a set of pre-installed packages from the container
# to be used with an arbitrary, potentially different set of projects bind mounted
# at /src
ENV GERBIL_PATH=/.gerbil
RUN bash /scripts/install-dependencies.sh
# Needed to prevent git from raging about /src
RUN git config --global --add safe.directory /src
COPY entrypoint.sh /scripts
ENTRYPOINT ["/scripts/entrypoint.sh"]

View File

@ -1,21 +0,0 @@
## Gerbil Scheme Builder
This container is designed to be used as a simple "build runner" environment for building and running Scheme projects using Gerbil and gerbil-ethereum. Its primary purpose is to allow build/test/run of gerbil code without the need to install and configure all the necessary prerequisites and dependencies on the host system.
### Usage
First build the container with:
```
$ laconic-so build-containers --include cerc/builder-gerbil
```
Now, assuming a gerbil project located at `~/projects/my-project`, run bash in the container mounting the project with:
```
$ docker run -it -v $HOME/projects/my-project:/src cerc/builder-gerbil:latest bash
root@7c4124bb09e3:/src#
```
Now gerbil commands can be run.

View File

@ -1,2 +0,0 @@
#!/bin/sh
exec "$@"

View File

@ -1,16 +0,0 @@
DEPS=(github.com/fare/gerbil-utils
github.com/fare/gerbil-poo
github.com/fare/gerbil-crypto
github.com/fare/gerbil-persist
github.com/fare/gerbil-ethereum
github.com/drewc/gerbil-swank
github.com/drewc/drewc-r7rs-swank
github.com/drewc/smug-gerbil
github.com/drewc/ftw
github.com/vyzo/gerbil-libp2p
) ;
for i in ${DEPS[@]} ; do
echo "Installing gerbil package: $i"
gxpkg install $i
gxpkg build $i
done

View File

@ -1,72 +0,0 @@
# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
# Which depends on: https://github.com/nodejs/docker-node/blob/main/Dockerfile-debian.template
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
ARG VARIANT=18-bullseye
FROM node:${VARIANT}
# Set these args to change the uid/gid for the base container's "node" user to match that of the host user (so bind mounts work as expected).
ARG CERC_HOST_UID=1000
ARG CERC_HOST_GID=1000
# Make these values available at runtime to allow a consistency check.
ENV HOST_UID=${CERC_HOST_UID}
ENV HOST_GID=${CERC_HOST_GID}
ARG USERNAME=node
ARG NPM_GLOBAL=/usr/local/share/npm-global
# Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH}
SHELL ["/bin/bash", "-c"]
RUN \
# Don't switch container uid/gid if the host uid/gid is 1000 (which means it's already correct),
# or root (which won't work anyway) or <= 100 (which also won't work).
if [[ ${CERC_HOST_GID} -ne 1000 && ${CERC_HOST_GID} -ne 0 && ${CERC_HOST_GID} -gt 100 ]]; then \
groupmod -g ${CERC_HOST_GID} ${USERNAME}; \
fi \
&& if [[ ${CERC_HOST_UID} -ne 1000 && ${CERC_HOST_UID} -ne 0 && ${CERC_HOST_UID} -gt 100 ]]; then \
usermod -u ${CERC_HOST_UID} -g ${CERC_HOST_GID} ${USERNAME} && chown ${CERC_HOST_UID}:${CERC_HOST_GID} /home/${USERNAME}; \
fi
# Prevents npm from printing version warnings
ENV NPM_CONFIG_UPDATE_NOTIFIER=false
RUN \
# Configure global npm install location, use group to adapt to UID/GID changes
if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \
&& usermod -a -G npm ${USERNAME} \
&& umask 0002 \
&& mkdir -p ${NPM_GLOBAL} \
&& touch /usr/local/etc/npmrc \
&& chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \
&& chmod g+s ${NPM_GLOBAL} \
&& npm config -g set prefix ${NPM_GLOBAL} \
&& su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
# Install eslint
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
&& npm cache clean --force > /dev/null 2>&1
# [Optional] Uncomment this section to install additional OS packages.
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends jq
# [Optional] Uncomment if you want to install an additional version of node using nvm
# ARG EXTRA_NODE_VERSION=10
# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}"
# [Optional] Uncomment if you want to install more global node modules
# RUN su node -c "npm install -g <your-package-list-here>"
RUN mkdir /scripts
COPY build-npm-package.sh /scripts
COPY yarn-local-registry-fixup.sh /scripts
COPY build-npm-package-local-dependencies.sh /scripts
COPY check-uid.sh /scripts
ENV PATH="${PATH}:/scripts"
COPY entrypoint.sh .
ENTRYPOINT ["./entrypoint.sh"]
# Placeholder CMD : generally this will be overridden at run time like :
# docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
CMD node --version

View File

@ -1,17 +0,0 @@
## JS/TS Package Builder
This container is designed to be used as a simple "build runner" environment for building and publishing JS/TS projects
using `yarn`.
### Running a build
As a temporary measure while the necessary functionality is being added to Stack Orchestrator,
it is possible to build packages manually by invoking `docker run` , for example as follows:
```
$ docker run --rm -it --add-host gitea.local:host-gateway \
-v ${HOME}/cerc/laconic-registry-cli:/workspace cerc/builder-js \
sh -c 'cd /workspace && CERC_NPM_AUTH_TOKEN=6613572a28ebebaee20ccd90064251fa8c2b94f6 \
build-npm-package-local-dependencies.sh http://gitea.local:3000/api/packages/cerc-io/npm/ 0.1.8'
```

View File

@ -1,37 +0,0 @@
#!/bin/bash
# Usage: build-npm-package-local-dependencies.sh <registry-url> <publish-with-this-version>
# Runs build-npm-package.sh after first fixing up yarn.lock to use a local
# npm registry for all packages in a specific scope (currently @cerc-io, @lirewine and @muknsys)
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
if ! [[ $# -eq 1 || $# -eq 2 ]]; then
echo "Illegal number of parameters" >&2
exit 1
fi
if [[ -z "${CERC_NPM_AUTH_TOKEN}" ]]; then
echo "CERC_NPM_AUTH_TOKEN is not set" >&2
exit 1
fi
# Exit on error
set -e
local_npm_registry_url=$1
package_publish_version=$2
# If we need to handle an additional scope, add it to the list below:
npm_scopes_to_handle=("@cerc-io" "@lirewine" "@muknsys")
for npm_scope_for_local in ${npm_scopes_to_handle[@]}
do
# We need to configure the local registry
npm config set ${npm_scope_for_local}:registry ${local_npm_registry_url}
npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
# Find the set of dependencies from the specified scope
mapfile -t dependencies_from_scope < <(cat package.json | jq -r '.dependencies | with_entries(if (.key|test("^'${npm_scope_for_local}'/.*$")) then ( {key: .key, value: .value } ) else empty end ) | keys[]')
echo "Fixing up dependencies in scope ${npm_scope_for_local}"
for package in "${dependencies_from_scope[@]}"
do
echo "Fixing up package ${package}"
yarn-local-registry-fixup.sh $package ${local_npm_registry_url}
done
done
echo "Running build"
build-npm-package.sh ${local_npm_registry_url} ${package_publish_version}

View File

@ -1,47 +0,0 @@
#!/bin/bash
# Usage: build-npm-package.sh <registry-url> <publish-with-this-version>
# Note: supply the registry auth token in CERC_NPM_AUTH_TOKEN
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
fi
if ! [[ $# -eq 1 || $# -eq 2 ]]; then
echo "Illegal number of parameters" >&2
exit 1
fi
if [[ -z "${CERC_NPM_AUTH_TOKEN}" ]]; then
echo "CERC_NPM_AUTH_TOKEN is not set" >&2
exit 1
fi
if [[ $# -eq 2 ]]; then
package_publish_version=$2
else
package_publish_version=$( cat package.json | jq -r .version )
fi
# Exit on error
set -e
# Get the name of this package from package.json since we weren't passed that
package_name=$( cat package.json | jq -r .name )
local_npm_registry_url=$1
npm config set @cerc-io:registry ${local_npm_registry_url}
npm config set @lirewine:registry ${local_npm_registry_url}
npm config set @muknsys:registry ${local_npm_registry_url}
# Workaround bug in npm unpublish where it needs the url to be of the form //<foo> and not http://<foo>
local_npm_registry_url_fixed=$( echo ${local_npm_registry_url} | sed -e 's/^http[s]\{0,1\}://')
npm config set -- ${local_npm_registry_url_fixed}:_authToken ${CERC_NPM_AUTH_TOKEN}
# First check if the version of this package we're trying to build already exists in the registry
package_exists=$( yarn info --json ${package_name}@${package_publish_version} 2>/dev/null | jq -r .data.dist.tarball )
if [[ ! -z "$package_exists" && "$package_exists" != "null" ]]; then
echo "${package_publish_version} of ${package_name} already exists in the registry"
if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then
# Attempt to unpublish the existing package
echo "NOTE: unpublishing existing package version since force rebuild is enabled"
npm unpublish --force ${package_name}@${package_publish_version}
else
echo "skipping build since target version already exists"
exit 0
fi
fi
echo "Build and publish ${package_name} version ${package_publish_version}"
yarn install
yarn build
yarn publish --non-interactive --new-version ${package_publish_version} --no-git-tag-version

View File

@ -1,21 +0,0 @@
#!/bin/bash
# Make the container usable for uid/gid != 1000
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
fi
current_uid=$(id -u)
current_gid=$(id -g)
# Don't check if running as root
if [[ ${current_uid} == 0 ]]; then
exit 0
fi
# Check the current uid/gid vs the uid/gid used to build the container.
# We do this because both bind mounts and npm tooling require the uid/gid to match.
if [[ ${current_gid} != ${HOST_GID} ]]; then
echo "Warning: running with gid: ${current_gid} which is not the gid for which this container was built (${HOST_GID})"
exit 0
fi
if [[ ${current_uid} != ${HOST_UID} ]]; then
echo "Warning: running with gid: ${current_uid} which is not the uid for which this container was built (${HOST_UID})"
exit 0
fi

View File

@ -1,3 +0,0 @@
#!/bin/sh
/scripts/check-uid.sh
exec "$@"

View File

@ -1,58 +0,0 @@
#!/bin/bash
# Usage: yarn-local-registry-fixup.sh <package-to-fix> <registry-url>
# Assumes package.json and yarn.lock are in the cwd
# The purpose of this script is to take a project cloned from git
# and "fixup" its yarn.lock file such that specified dependency
# will be fetched from a registry other than the one used when
# yarn.lock was generated. It updates all checksums using data
# from the "new" registry (because due to embedded timestamps etc
# the same source code re-built later will not have the same checksum).
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
if [[ $# -ne 2 ]]; then
echo "Illegal number of parameters" >&2
exit 1
fi
# Exit on error
set -e
target_package=$1
local_npm_registry_url=$2
# Extract the actual version pinned in yarn.lock
# See: https://stackoverflow.com/questions/60454251/how-to-know-the-version-of-currently-installed-package-from-yarn-lock
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
# Use yarn info to get URL checksums etc from the new registry
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
# First check if the target version actually exists.
# If it doesn't exist there will be no .data.dist.tarball element,
# and jq will output the string "null"
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
if [[ $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found" >&2
exit 1
fi
# Code below parses out the values we need
# When running inside a container, the registry can return a URL with the wrong host name due to proxying
# so we need to check if that has happened and fix the URL if so.
if ! [[ "${package_tarball}" =~ ^${local_npm_registry_url}.* ]]; then
# HACK: I've hard-wired the host names below. Replace with proper implementation
# TODO: remove the hack when proven no longer necessary
package_tarball=$( echo ${package_tarball} | sed -e 's/localhost/gitea.local/g' )
fi
package_integrity=$(echo $yarn_info_output | jq -r .data.dist.integrity)
package_shasum=$(echo $yarn_info_output | jq -r .data.dist.shasum)
package_resolved=${package_tarball}#${package_shasum}
# Some strings need to be escaped so they work when passed to sed later
escaped_package_integrity=$(printf '%s\n' "$package_integrity" | sed -e 's/[\/&]/\\&/g')
escaped_package_resolved=$(printf '%s\n' "$package_resolved" | sed -e 's/[\/&]/\\&/g')
escaped_target_package=$(printf '%s\n' "$target_package" | sed -e 's/[\/&]/\\&/g')
if [ -n "$CERC_SCRIPT_VERBOSE" ]; then
echo "Tarball: ${package_tarball}"
echo "Integrity: ${package_integrity}"
echo "Shasum: ${package_shasum}"
echo "Resolved: ${package_resolved}"
fi
# Use magic sed regex to replace the values in yarn.lock
# Note: yarn.lock is not json so we can not use jq for this
sed -i -e '/^\"'${escaped_target_package}'.*\":$/ , /^\".*$/ s/^\([[:space:]]\{1,\}resolved \).*$/\1'\"${escaped_package_resolved}\"'/' yarn.lock
sed -i -e '/^\"'${escaped_target_package}'.*\":$/ , /^\".*$/ s/^\([[:space:]]\{1,\}integrity \).*$/\1'${escaped_package_integrity}'/' yarn.lock

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
# Build cerc/eth-probe
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-probe:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-probe

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-fill-service
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-statediff-fill-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-fill-service

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-service
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-statediff-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-service

Some files were not shown because too many files have changed in this diff Show More