Compare commits

..

1 Commits

Author SHA1 Message Date
bd8c78e1a3 update fixturenet-plugeth test 2023-11-02 09:26:47 -04:00
662 changed files with 1688 additions and 41858 deletions

View File

@ -6,8 +6,6 @@ on:
paths: paths:
- '!**' - '!**'
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test' - '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '2 14 * * *'
# Needed until we can incorporate docker startup into the executor container # Needed until we can incorporate docker startup into the executor container
env: env:

View File

@ -6,8 +6,11 @@ on:
paths: paths:
- '!**' - '!**'
- '.gitea/workflows/triggers/fixturenet-laconicd-test' - '.gitea/workflows/triggers/fixturenet-laconicd-test'
schedule:
- cron: '1 13 * * *' # Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs: jobs:
test: test:
@ -44,5 +47,9 @@ jobs:
run: ./scripts/create_build_tag_file.sh run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package" - name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run fixturenet-laconicd tests" - name: "Run fixturenet-laconicd tests"
run: ./tests/fixturenet-laconicd/run-test.sh run: ./tests/fixturenet-laconicd/run-test.sh

View File

@ -1,21 +0,0 @@
name: Lint Checks
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run linter"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name : "Run flake8"
uses: py-actions/flake8@v2

View File

@ -1,54 +0,0 @@
name: Container Registry Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-container-registry'
- '.gitea/workflows/test-container-registry.yml'
- 'tests/container-registry/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '6 19 * * *'
jobs:
test:
name: "Run contaier registry hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Install ed" # Only needed until we remove the need to edit the spec file
run: apt update && apt install -y ed
- name: "Run container registry deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/container-registry/run-test.sh

View File

@ -1,52 +0,0 @@
name: Database Test
on:
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-database'
- '.gitea/workflows/test-database.yml'
- 'tests/database/run-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '5 18 * * *'
jobs:
test:
name: "Run database hosting test on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run database deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/database/run-test.sh

View File

@ -1,54 +0,0 @@
name: K8s Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
paths:
- '!**'
- '.gitea/workflows/triggers/test-k8s-deploy'
- '.gitea/workflows/test-k8s-deploy.yml'
- 'tests/k8s-deploy/run-deploy-test.sh'
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
- cron: '3 15 * * *'
jobs:
test:
name: "Run deploy test suite on kind/k8s"
runs-on: ubuntu-22.04
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Check cgroups version"
run: mount | grep cgroup
- name: "Install kind"
run: ./tests/scripts/install-kind.sh
- name: "Install Kubectl"
run: ./tests/scripts/install-kubectl.sh
- name: "Run k8s deployment test"
run: |
source /opt/bash-utils/cgroup-helper.sh
join_cgroup
./tests/k8s-deploy/run-deploy-test.sh

View File

@ -1,51 +0,0 @@
name: Webapp Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
paths-ignore:
- '.gitea/workflows/triggers/*'
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run webapp test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
# At present the stock setup-python action fails on Linux/aarch64
# Conditional steps below workaroud this by using deadsnakes for that case only
- name: "Install Python for ARM on Linux"
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
uses: deadsnakes/action@v3.0.1
with:
python-version: '3.8'
- name: "Install Python cases other than ARM on Linux"
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
run: apt update && apt install -y wget
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run webapp tests"
run: ./tests/webapp-test/run-webapp-test.sh

View File

@ -1,3 +1,2 @@
Change this file to trigger running the fixturenet-eth-plugeth-test CI job Change this file to trigger running the fixturenet-eth-plugeth-test CI job
trigger trigger
trigger

View File

@ -1,3 +1,2 @@
Change this file to trigger running the fixturenet-laconicd-test CI job Change this file to trigger running the fixturenet-laconicd-test CI job
Trigger
Trigger

View File

@ -1 +0,0 @@
Change this file to trigger running the test-container-registry CI job

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-database CI job
Trigger test run

View File

@ -1,2 +0,0 @@
Change this file to trigger running the test-k8s-deploy CI job
Trigger test on PR branch

View File

@ -1,14 +1,15 @@
name: Webapp Test name: Fixturenet-Plugeth Test
on: on:
pull_request:
branches: '*'
push: push:
branches: '*' branches: '*'
paths:
- '!**'
- '.github/workflows/triggers/fixturenet-plugeth-test'
jobs: jobs:
test: test:
name: "Run webapp test suite" name: "Run fixturenet-plugeth test suite"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: "Clone project repository" - name: "Clone project repository"
@ -25,5 +26,5 @@ jobs:
run: ./scripts/create_build_tag_file.sh run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package" - name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh run: ./scripts/build_shiv_package.sh
- name: "Run webapp tests" - name: "Run fixturenet-plugeth tests"
run: ./tests/webapp-test/run-webapp-test.sh run: ./tests/fixturenet-plugeth/run-test.sh

View File

@ -0,0 +1,3 @@
Change this file to trigger running the fixturenet-plugeth-test CI job
trigger

2
.gitignore vendored
View File

@ -6,5 +6,5 @@ laconic_stack_orchestrator.egg-info
__pycache__ __pycache__
*~ *~
package package
stack_orchestrator/data/build_tag.txt app/data/build_tag.txt
/build /build

View File

@ -29,10 +29,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory. a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable: Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
```bash ```bash
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
``` ```
Give it execute permissions: Give it execute permissions:
@ -52,7 +52,7 @@ Version: 1.1.0-7a607c2-202304260513
Save the distribution url to `~/.laconic-so/config.yml`: Save the distribution url to `~/.laconic-so/config.yml`:
```bash ```bash
mkdir ~/.laconic-so mkdir ~/.laconic-so
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml echo "distribution-url: https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so" > ~/.laconic-so/config.yml
``` ```
### Update ### Update
@ -64,12 +64,12 @@ laconic-so update
## Usage ## Usage
The various [stacks](/stack_orchestrator/data/stacks) each contain instructions for running different stacks based on your use case. For example: The various [stacks](/app/data/stacks) each contain instructions for running different stacks based on your use case. For example:
- [self-hosted Gitea](/stack_orchestrator/data/stacks/build-support) - [self-hosted Gitea](/app/data/stacks/build-support)
- [an Optimism Fixturenet](/stack_orchestrator/data/stacks/fixturenet-optimism) - [an Optimism Fixturenet](/app/data/stacks/fixturenet-optimism)
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded) - [laconicd with console and CLI](app/data/stacks/fixturenet-laconic-loaded)
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo) - [kubo (IPFS)](app/data/stacks/kubo)
## Contributing ## Contributing

View File

@ -15,7 +15,7 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from stack_orchestrator.deploy.deploy import get_stack_status from app.deploy.deploy import get_stack_status
from decouple import config from decouple import config

View File

@ -27,103 +27,13 @@ import subprocess
import click import click
import importlib.resources import importlib.resources
from pathlib import Path from pathlib import Path
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external, warn_exit from app.util import include_exclude_check, get_parsed_stack_config
from stack_orchestrator.base import get_npm_registry_url from app.base import get_npm_registry_url
# TODO: find a place for this # TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
def make_container_build_env(dev_root_path: str,
container_build_dir: str,
debug: bool,
force_rebuild: bool,
extra_build_args: str):
container_build_env = {
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env:
container_build_env.update({"DOCKER_HOST": docker_host_env})
return container_build_env
def process_container(stack: str,
container,
container_build_dir: str,
container_build_env: dict,
dev_root_path: str,
quiet: bool,
verbose: bool,
dry_run: bool,
continue_on_error: bool,
):
if not quiet:
print(f"Building: {container}")
default_container_tag = f"{container}:local"
container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
# Check if this is in an external stack
if stack_is_external(stack):
container_parent_dir = Path(stack).joinpath("container-build")
temp_build_dir = container_parent_dir.joinpath(container.replace("/", "-"))
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
# Now check if the container exists in the external stack.
if not temp_build_script_filename.exists():
# If not, revert to building an internal container
container_parent_dir = container_build_dir
else:
container_parent_dir = container_build_dir
build_dir = container_parent_dir.joinpath(container.replace("/", "-"))
build_script_filename = build_dir.joinpath("build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename.as_posix()
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir,
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
if not dry_run:
# No PATH at all causes failures with podman.
if "PATH" not in container_build_env:
container_build_env["PATH"] = os.environ["PATH"]
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
@click.command() @click.command()
@click.option('--include', help="only build these containers") @click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers") @click.option('--exclude', help="don\'t build these containers")
@ -157,15 +67,13 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
print('Dev root directory doesn\'t exist, creating') print('Dev root directory doesn\'t exist, creating')
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from app import data
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file: with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
all_containers = container_list_file.read().splitlines() all_containers = container_list_file.read().splitlines()
containers_in_scope = [] containers_in_scope = []
if stack: if stack:
stack_config = get_parsed_stack_config(stack) stack_config = get_parsed_stack_config(stack)
if "containers" not in stack_config or stack_config["containers"] is None:
warn_exit(f"stack {stack} does not define any containers")
containers_in_scope = stack_config['containers'] containers_in_scope = stack_config['containers']
else: else:
containers_in_scope = all_containers containers_in_scope = all_containers
@ -175,16 +83,61 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if stack: if stack:
print(f"Stack: {stack}") print(f"Stack: {stack}")
container_build_env = make_container_build_env(dev_root_path, # TODO: make this configurable
container_build_dir, container_build_env = {
debug, "CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
force_rebuild, "CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
extra_build_args) "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env:
container_build_env.update({"DOCKER_HOST": docker_host_env})
def process_container(container):
if not quiet:
print(f"Building: {container}")
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
build_script_filename = os.path.join(build_dir, "build.sh")
if verbose:
print(f"Build script filename: {build_script_filename}")
if os.path.exists(build_script_filename):
build_command = build_script_filename
else:
if verbose:
print(f"No script file found: {build_script_filename}, using default build script")
repo_dir = container.split('/')[1]
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")
if build_result.returncode != 0:
print(f"Error running build for {container}")
if not continue_on_error:
print("FATAL Error: container build failed and --continue-on-error not set, exiting")
sys.exit(1)
else:
print("****** Container Build Error, continuing because --continue-on-error is set")
else:
print("Skipped")
for container in containers_in_scope: for container in containers_in_scope:
if include_exclude_check(container, include, exclude): if include_exclude_check(container, include, exclude):
process_container(stack, container, container_build_dir, container_build_env, process_container(container)
dev_root_path, quiet, verbose, dry_run, continue_on_error)
else: else:
if verbose: if verbose:
print(f"Excluding: {container}") print(f"Excluding: {container}")

View File

@ -25,8 +25,8 @@ from decouple import config
import click import click
import importlib.resources import importlib.resources
from python_on_whales import docker, DockerException from python_on_whales import docker, DockerException
from stack_orchestrator.base import get_stack from app.base import get_stack
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config from app.util import include_exclude_check, get_parsed_stack_config
builder_js_image_name = "cerc/builder-js:local" builder_js_image_name = "cerc/builder-js:local"
@ -83,7 +83,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
os.makedirs(build_root_path) os.makedirs(build_root_path)
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from stack_orchestrator import data from app import data
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file: with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
all_packages = package_list_file.read().splitlines() all_packages = package_list_file.read().splitlines()

View File

@ -4,6 +4,6 @@ services:
image: cerc/laconic-console-host:local image: cerc/laconic-console-host:local
environment: environment:
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production} - CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production}
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473} - LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
ports: ports:
- "80" - "80"

View File

@ -5,7 +5,7 @@ services:
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes: volumes:
# The cosmos-sdk node's database directory: # The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd - laconicd-data:/root/.laconicd/data
# TODO: look at folding these scripts into the container # TODO: look at folding these scripts into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh - ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh

View File

@ -6,8 +6,8 @@ services:
# Deploys the L1 smart contracts (outputs to volume l1_deployment) # Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts: fixturenet-optimism-contracts:
restart: on-failure restart: on-failure
image: cerc/optimism-contracts:local
hostname: fixturenet-optimism-contracts hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file: env_file:
- ../config/fixturenet-optimism/l1-params.env - ../config/fixturenet-optimism/l1-params.env
environment: environment:
@ -17,49 +17,27 @@ services:
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL} CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS} CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY} CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
# Waits for L1 endpoint to be up before running the script
command: |
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
volumes: volumes:
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh - ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh - ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- l2_accounts:/l2-accounts - l2_accounts:/l2-accounts
- l1_deployment:/l1-deployment - l1_deployment:/app/packages/contracts-bedrock
- l2_config:/l2-config
# Waits for L1 endpoint to be up before running the contract deploy script
command: |
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
restart: always
image: cerc/optimism-l2geth:local
hostname: op-geth
depends_on:
op-node:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/l2-config:ro
- l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh"
command: "/run-op-geth.sh"
ports:
- "8545"
- "8546"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 100
start_period: 10s
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Runs the L2 consensus client (Sequencer node) # Generates the config files required for L2 (outputs to volume l2_config)
# Generates the L2 config files if not already present (outputs to volume l2_config) op-node-l2-config-gen:
op-node: restart: on-failure
restart: always
image: cerc/optimism-op-node:local image: cerc/optimism-op-node:local
hostname: op-node
depends_on: depends_on:
fixturenet-optimism-contracts: fixturenet-optimism-contracts:
condition: service_completed_successfully condition: service_completed_successfully
@ -69,19 +47,61 @@ services:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC} CERC_L1_RPC: ${CERC_L1_RPC}
volumes: volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh - ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/l1-deployment:ro - l1_deployment:/contracts-bedrock:ro
- l2_config:/l2-config - l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
restart: always
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro - l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh" entrypoint: "sh"
command: "/run-op-node.sh" command: "/run-op-geth.sh"
ports: ports:
- "8547" - "0.0.0.0:8545:8545"
- "0.0.0.0:8546:8546"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
restart: always
image: cerc/optimism-op-node:local
depends_on:
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
- "0.0.0.0:8547:8547"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8547"] test: ["CMD", "nc", "-vz", "localhost:8547"]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 100 retries: 10
start_period: 10s start_period: 10s
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
@ -90,7 +110,6 @@ services:
op-batcher: op-batcher:
restart: always restart: always
image: cerc/optimism-op-batcher:local image: cerc/optimism-op-batcher:local
hostname: op-batcher
depends_on: depends_on:
op-node: op-node:
condition: service_healthy condition: service_healthy
@ -110,7 +129,7 @@ services:
command: | command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh" "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
ports: ports:
- "8548" - "127.0.0.1:8548:8548"
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
@ -118,29 +137,25 @@ services:
op-proposer: op-proposer:
restart: always restart: always
image: cerc/optimism-op-proposer:local image: cerc/optimism-op-proposer:local
hostname: op-proposer
depends_on: depends_on:
op-node: op-node:
condition: service_healthy condition: service_healthy
op-geth:
condition: service_healthy
env_file: env_file:
- ../config/fixturenet-optimism/l1-params.env - ../config/fixturenet-optimism/l1-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC} CERC_L1_RPC: ${CERC_L1_RPC}
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
volumes: volumes:
- ../config/network/wait-for-it.sh:/wait-for-it.sh - ../config/network/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh - ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
- l1_deployment:/l1-deployment:ro - l1_deployment:/contracts-bedrock:ro
- l2_accounts:/l2-accounts:ro - l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"] entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the proposer # Waits for L1 endpoint to be up before running the proposer
command: | command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh" "/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
ports: ports:
- "8560" - "127.0.0.1:8560:8560"
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"

View File

@ -0,0 +1,13 @@
version: "3.2"
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
services:
ipfs:
image: ipfs/kubo:master-2023-02-20-714a968
restart: always
volumes:
- ./ipfs/import:/import
- ./ipfs/data:/data/ipfs
ports:
- "0.0.0.0:8080:8080"
- "0.0.0.0:4001:4001"
- "0.0.0.0:5001:5001"

View File

@ -5,15 +5,10 @@ services:
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED} CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
volumes: volumes:
- test-data-bind:/data - test-data:/data
- test-data-auto:/data2
- test-config:/config:ro
ports: ports:
- "80" - "80"
volumes: volumes:
test-data-bind: test-data:
test-data-auto:
test-config:

View File

@ -10,7 +10,6 @@ services:
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue - POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto, - POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
- POSTGRES_PASSWORD=password - POSTGRES_PASSWORD=password
command: ["postgres", "-c", "max_connections=200"]
volumes: volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- watcher_db_data:/var/lib/postgresql/data - watcher_db_data:/var/lib/postgresql/data
@ -23,38 +22,6 @@ services:
retries: 15 retries: 15
start_period: 10s start_period: 10s
# Starts the azimuth-watcher job runner
azimuth-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CERC_HISTORICAL_BLOCK_RANGE: 500
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
CONTRACT_NAME: Azimuth
STARTING_BLOCK: 6784880
working_dir: /app/packages/azimuth-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/azimuth-watcher/start-job-runner.sh
ports:
- "9000"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the azimuth-watcher server # Starts the azimuth-watcher server
azimuth-watcher-server: azimuth-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -62,8 +29,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
azimuth-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -85,37 +52,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the censures-watcher job runner
censures-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
CONTRACT_NAME: Censures
STARTING_BLOCK: 6784954
working_dir: /app/packages/censures-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/censures-watcher/start-job-runner.sh
ports:
- "9002"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the censures-watcher server # Starts the censures-watcher server
censures-watcher-server: censures-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -123,8 +59,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
censures-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -146,37 +82,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the claims-watcher job runner
claims-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
CONTRACT_NAME: Claims
STARTING_BLOCK: 6784941
working_dir: /app/packages/claims-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/claims-watcher/start-job-runner.sh
ports:
- "9004"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the claims-watcher server # Starts the claims-watcher server
claims-watcher-server: claims-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -184,8 +89,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
claims-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -207,37 +112,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the conditional-star-release-watcher job runner
conditional-star-release-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
CONTRACT_NAME: ConditionalStarRelease
STARTING_BLOCK: 6828004
working_dir: /app/packages/conditional-star-release-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/conditional-star-release-watcher/start-job-runner.sh
ports:
- "9006"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9006"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the conditional-star-release-watcher server # Starts the conditional-star-release-watcher server
conditional-star-release-watcher-server: conditional-star-release-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -245,8 +119,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
conditional-star-release-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -268,37 +142,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the delegated-sending-watcher job runner
delegated-sending-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
CONTRACT_NAME: DelegatedSending
STARTING_BLOCK: 6784956
working_dir: /app/packages/delegated-sending-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/delegated-sending-watcher/start-job-runner.sh
ports:
- "9008"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9008"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the delegated-sending-watcher server # Starts the delegated-sending-watcher server
delegated-sending-watcher-server: delegated-sending-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -306,8 +149,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
delegated-sending-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -329,37 +172,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the ecliptic-watcher job runner
ecliptic-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
CONTRACT_NAME: Ecliptic
STARTING_BLOCK: 13692129
working_dir: /app/packages/ecliptic-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/ecliptic-watcher/start-job-runner.sh
ports:
- "9010"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9010"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the ecliptic-watcher server # Starts the ecliptic-watcher server
ecliptic-watcher-server: ecliptic-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -367,8 +179,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
ecliptic-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -390,37 +202,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the linear-star-release-watcher job runner
linear-star-release-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
CONTRACT_NAME: LinearStarRelease
STARTING_BLOCK: 6784943
working_dir: /app/packages/linear-star-release-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/linear-star-release-watcher/start-job-runner.sh
ports:
- "9012"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9012"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the linear-star-release-watcher server # Starts the linear-star-release-watcher server
linear-star-release-watcher-server: linear-star-release-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -428,8 +209,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
linear-star-release-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
@ -451,37 +232,6 @@ services:
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
# Starts the polls-watcher job runner
polls-watcher-job-runner:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
CONTRACT_NAME: Polls
STARTING_BLOCK: 6784912
working_dir: /app/packages/polls-watcher
command: "./start-job-runner.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/polls-watcher/start-job-runner.sh
ports:
- "9014"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "9014"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the polls-watcher server # Starts the polls-watcher server
polls-watcher-server: polls-watcher-server:
image: cerc/watcher-azimuth:local image: cerc/watcher-azimuth:local
@ -489,8 +239,8 @@ services:
depends_on: depends_on:
watcher-db: watcher-db:
condition: service_healthy condition: service_healthy
polls-watcher-job-runner: env_file:
condition: service_healthy - ../config/watcher-azimuth/watcher-params.env
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC} CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}

View File

@ -0,0 +1,118 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -0,0 +1,37 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
--outfile.l2 /app/genesis.json \
--outfile.rollup /app/rollup.json \
--l1-rpc $CERC_L1_RPC
openssl rand -hex 32 > /app/jwt.txt

View File

@ -0,0 +1,131 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
# Read JSON file into variable
KEYS_JSON=$(cat /l2-accounts/keys.json)
# Parse JSON into variables
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Get the private keys of L1 accounts
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using them from env"
fi
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
echo "Balances sent to L2 accounts"
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
echo "Updated the deployment config"
# Create a .env file
echo "L1_RPC=$CERC_L1_RPC" > .env
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
echo "Deploying the L1 smart contracts, this will take a while..."
# Deploy the L1 smart contracts
yarn hardhat deploy --network getting-started --tags l1
echo "Deployed the L1 smart contracts"
# Read Proxy contract's JSON and get the address
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
# First account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
# Second account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
echo "Balance sent to Proxy L2 contract"
echo "Use following accounts for transactions in L2:"
echo "${CERC_L1_ADDRESS}"
echo "${CERC_L1_ADDRESS_2}"
echo "Done"

Some files were not shown because too many files have changed in this diff Show More