forked from cerc-io/stack-orchestrator
Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
88db2ff766 | ||
|
5f556e127a | ||
|
926997b21c | ||
|
0d91a62f84 | ||
|
7d27eaef0f | ||
|
8ad2b692ec | ||
|
54cc993fa4 |
@ -6,8 +6,6 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '2 14 * * *'
|
|
||||||
|
|
||||||
# Needed until we can incorporate docker startup into the executor container
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
env:
|
env:
|
||||||
|
@ -6,8 +6,11 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
- '.gitea/workflows/triggers/fixturenet-laconicd-test'
|
||||||
schedule:
|
|
||||||
- cron: '1 13 * * *'
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@ -44,5 +47,9 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run fixturenet-laconicd tests"
|
- name: "Run fixturenet-laconicd tests"
|
||||||
run: ./tests/fixturenet-laconicd/run-test.sh
|
run: ./tests/fixturenet-laconicd/run-test.sh
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
name: Lint Checks
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: '*'
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run linter"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: "Install Python"
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name : "Run flake8"
|
|
||||||
uses: py-actions/flake8@v2
|
|
@ -1,54 +0,0 @@
|
|||||||
name: Container Registry Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-container-registry'
|
|
||||||
- '.gitea/workflows/test-container-registry.yml'
|
|
||||||
- 'tests/container-registry/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '6 19 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run contaier registry hosting test on kind/k8s"
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Check cgroups version"
|
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Install ed" # Only needed until we remove the need to edit the spec file
|
|
||||||
run: apt update && apt install -y ed
|
|
||||||
- name: "Run container registry deployment test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/container-registry/run-test.sh
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
|||||||
name: Database Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/test-database'
|
|
||||||
- '.gitea/workflows/test-database.yml'
|
|
||||||
- 'tests/database/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '5 18 * * *'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run database hosting test on kind/k8s"
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Check cgroups version"
|
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run database deployment test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/database/run-test.sh
|
|
||||||
|
|
@ -4,19 +4,20 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches:
|
||||||
paths:
|
- main
|
||||||
- '!**'
|
- ci-test
|
||||||
- '.gitea/workflows/triggers/test-k8s-deploy'
|
paths-ignore:
|
||||||
- '.gitea/workflows/test-k8s-deploy.yml'
|
- '.gitea/workflows/triggers/*'
|
||||||
- 'tests/k8s-deploy/run-deploy-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
- cron: '3 15 * * *'
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run deploy test suite on kind/k8s"
|
name: "Run deploy test suite"
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -40,15 +41,15 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Check cgroups version"
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
run: mount | grep cgroup
|
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment test"
|
|
||||||
run: |
|
run: |
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
join_cgroup
|
sleep 5
|
||||||
./tests/k8s-deploy/run-deploy-test.sh
|
- name: "Install Go"
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.21'
|
||||||
|
- name: "Install Kind"
|
||||||
|
run: go install sigs.k8s.io/kind@v0.20.0
|
||||||
|
- name: "Debug Kind"
|
||||||
|
run: kind create cluster --retain && docker logs kind-control-plane
|
||||||
|
@ -41,8 +41,6 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Install wget" # 20240109 - Only needed until the executors are updated.
|
|
||||||
run: apt update && apt install -y wget
|
|
||||||
- name: Start dockerd # Also needed until we can incorporate into the executor
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
run: |
|
run: |
|
||||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
@ -1,3 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||||
trigger
|
trigger
|
||||||
trigger
|
|
||||||
|
@ -1,3 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-laconicd-test CI job
|
Change this file to trigger running the fixturenet-laconicd-test CI job
|
||||||
Trigger
|
|
||||||
Trigger
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the test-database CI job
|
|
||||||
Trigger test run
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the test-k8s-deploy CI job
|
|
||||||
Trigger test on PR branch
|
|
@ -29,10 +29,10 @@ chmod +x ~/.docker/cli-plugins/docker-compose
|
|||||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||||
|
|
||||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -L -o ~/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
```
|
```
|
||||||
|
|
||||||
Give it execute permissions:
|
Give it execute permissions:
|
||||||
@ -52,7 +52,7 @@ Version: 1.1.0-7a607c2-202304260513
|
|||||||
Save the distribution url to `~/.laconic-so/config.yml`:
|
Save the distribution url to `~/.laconic-so/config.yml`:
|
||||||
```bash
|
```bash
|
||||||
mkdir ~/.laconic-so
|
mkdir ~/.laconic-so
|
||||||
echo "distribution-url: https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so" > ~/.laconic-so/config.yml
|
echo "distribution-url: https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so" > ~/.laconic-so/config.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Update
|
### Update
|
||||||
|
@ -26,7 +26,7 @@ In addition to the pre-requisites listed in the [README](/README.md), the follow
|
|||||||
|
|
||||||
1. Clone this repository:
|
1. Clone this repository:
|
||||||
```
|
```
|
||||||
$ git clone https://git.vdb.to/cerc-io/stack-orchestrator.git
|
$ git clone https://github.com/cerc-io/stack-orchestrator.git
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Enter the project directory:
|
2. Enter the project directory:
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Adding a new stack
|
# Adding a new stack
|
||||||
|
|
||||||
See [this PR](https://git.vdb.to/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://git.vdb.to/cerc-io/stack-orchestrator/pull/435) is another good example.
|
See [this PR](https://github.com/cerc-io/stack-orchestrator/pull/434) for an example of how to currently add a minimal stack to stack orchestrator. The [reth stack](https://github.com/cerc-io/stack-orchestrator/pull/435) is another good example.
|
||||||
|
|
||||||
For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done.
|
For external developers, we recommend forking this repo and adding your stack directly to your fork. This initially requires running in "developer mode" as described [here](/docs/CONTRIBUTING.md). Check out the [Namada stack](https://github.com/vknowable/stack-orchestrator/blob/main/app/data/stacks/public-namada/digitalocean_quickstart.md) from Knowable to see how that is done.
|
||||||
|
|
||||||
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
Core to the feature completeness of stack orchestrator is to [decouple the tool functionality from payload](https://github.com/cerc-io/stack-orchestrator/issues/315) which will no longer require forking to add a stack.
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Specification
|
# Specification
|
||||||
|
|
||||||
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://git.vdb.to/cerc-io/stack-orchestrator/issues/315).
|
Note: this page is out of date (but still useful) - it will no longer be useful once stacks are [decoupled from the tool functionality](https://github.com/cerc-io/stack-orchestrator/issues/315).
|
||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
|
|
||||||
|
@ -1,64 +0,0 @@
|
|||||||
### Building and Running Webapps
|
|
||||||
|
|
||||||
It is possible to build and run Next.js webapps using the `build-webapp` and `run-webapp` subcommands.
|
|
||||||
|
|
||||||
To make it easier to build once and deploy into different environments and with different configuration,
|
|
||||||
compilation and static page generation are separated in the `build-webapp` and `run-webapp` steps.
|
|
||||||
|
|
||||||
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
|
||||||
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
|
||||||
not their build environment.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
Building usually requires no additional configuration. By default, the Next.js version specified in `package.json`
|
|
||||||
is used, and either `yarn` or `npm` will be used automatically depending on which lock files are present. These
|
|
||||||
can be overidden with the build arguments `CERC_NEXT_VERSION` and `CERC_BUILD_TOOL` respectively. For example: `--extra-build-args "--build-arg CERC_NEXT_VERSION=13.4.12"`
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```
|
|
||||||
$ cd ~/cerc
|
|
||||||
$ git clone git@git.vdb.to:cerc-io/test-progressive-web-app.git
|
|
||||||
$ laconic-so build-webapp --source-repo ~/cerc/test-progressive-web-app
|
|
||||||
...
|
|
||||||
|
|
||||||
Built host container for ~/cerc/test-progressive-web-app with tag:
|
|
||||||
|
|
||||||
cerc/test-progressive-web-app:local
|
|
||||||
|
|
||||||
To test locally run:
|
|
||||||
|
|
||||||
laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment.env
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Running
|
|
||||||
|
|
||||||
With `run-webapp` a new container will be launched on the local machine, with runtime configuration provided by `--env-file` (if specified) and published on an available port. Multiple instances can be launched with different configuration.
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```
|
|
||||||
# Production env
|
|
||||||
$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/production.env
|
|
||||||
|
|
||||||
Image: cerc/test-progressive-web-app:local
|
|
||||||
ID: 4c6e893bf436b3e91a2b92ce37e30e499685131705700bd92a90d2eb14eefd05
|
|
||||||
URL: http://localhost:32768
|
|
||||||
|
|
||||||
# Dev env
|
|
||||||
$ laconic-so run-webapp --image cerc/test-progressive-web-app:local --env-file /path/to/environment/dev.env
|
|
||||||
|
|
||||||
Image: cerc/test-progressive-web-app:local
|
|
||||||
ID: 9ab96494f563aafb6c057d88df58f9eca81b90f8721a4e068493a289a976051c
|
|
||||||
URL: http://localhost:32769
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploying
|
|
||||||
|
|
||||||
Use the subcommand `deploy-webapp create` to make a deployment directory that can be subsequently deployed to a Kubernetes cluster.
|
|
||||||
Example commands are shown below, assuming that the webapp container image `cerc/test-progressive-web-app:local` has already been built:
|
|
||||||
```
|
|
||||||
$ laconic-so deploy-webapp create --kube-config ~/kubectl/k8s-kubeconfig.yaml --image-registry registry.digitalocean.com/laconic-registry --deployment-dir webapp-k8s-deployment --image cerc/test-progressive-web-app:local --url https://test-pwa-app.hosting.laconic.com/ --env-file test-webapp.env
|
|
||||||
$ laconic-so deployment --dir webapp-k8s-deployment push-images
|
|
||||||
$ laconic-so deployment --dir webapp-k8s-deployment start
|
|
||||||
```
|
|
@ -10,4 +10,3 @@ pydantic==1.10.9
|
|||||||
tomli==2.0.1
|
tomli==2.0.1
|
||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
|
||||||
|
@ -41,4 +41,4 @@ runcmd:
|
|||||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
- systemctl enable docker
|
- systemctl enable docker
|
||||||
- systemctl start docker
|
- systemctl start docker
|
||||||
- git clone https://git.vdb.to/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
- git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator
|
||||||
|
@ -31,5 +31,5 @@ runcmd:
|
|||||||
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
- systemctl enable docker
|
- systemctl enable docker
|
||||||
- systemctl start docker
|
- systemctl start docker
|
||||||
- curl -L -o /usr/local/bin/laconic-so https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
- curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
- chmod +x /usr/local/bin/laconic-so
|
- chmod +x /usr/local/bin/laconic-so
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Beginnings of a script to quickly spin up and test a deployment
|
|
||||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
if [[ -n "$1" ]]; then
|
|
||||||
stack_name=$1
|
|
||||||
else
|
|
||||||
stack_name="test"
|
|
||||||
fi
|
|
||||||
spec_file_name="${stack_name}-spec.yml"
|
|
||||||
deployment_dir_name="${stack_name}-deployment"
|
|
||||||
rm -f ${spec_file_name}
|
|
||||||
rm -rf ${deployment_dir_name}
|
|
||||||
laconic-so --stack ${stack_name} deploy --deploy-to compose init --output ${spec_file_name}
|
|
||||||
laconic-so --stack ${stack_name} deploy --deploy-to compose create --deployment-dir ${deployment_dir_name} --spec-file ${spec_file_name}
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} start
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} ps
|
|
||||||
#laconic-so deployment --dir ${deployment_dir_name} stop
|
|
@ -137,7 +137,7 @@ fi
|
|||||||
echo "**************************************************************************************"
|
echo "**************************************************************************************"
|
||||||
echo "Installing laconic-so"
|
echo "Installing laconic-so"
|
||||||
# install latest `laconic-so`
|
# install latest `laconic-so`
|
||||||
distribution_url=https://git.vdb.to/cerc-io/stack-orchestrator/releases/download/latest/laconic-so
|
distribution_url=https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
|
||||||
install_filename=${install_dir}/laconic-so
|
install_filename=${install_dir}/laconic-so
|
||||||
mkdir -p ${install_dir}
|
mkdir -p ${install_dir}
|
||||||
curl -L -o ${install_filename} ${distribution_url}
|
curl -L -o ${install_filename} ${distribution_url}
|
||||||
|
2
setup.py
2
setup.py
@ -13,7 +13,7 @@ setup(
|
|||||||
description='Orchestrates deployment of the Laconic stack',
|
description='Orchestrates deployment of the Laconic stack',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url='https://github.com/cerc-io/stack-orchestrator',
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=['stack_orchestrator'],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
|
@ -27,13 +27,12 @@ import subprocess
|
|||||||
import click
|
import click
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config, stack_is_external, warn_exit
|
from stack_orchestrator.util import include_exclude_check, get_parsed_stack_config
|
||||||
from stack_orchestrator.base import get_npm_registry_url
|
from stack_orchestrator.base import get_npm_registry_url
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(dev_root_path: str,
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
@ -59,8 +58,7 @@ def make_container_build_env(dev_root_path: str,
|
|||||||
return container_build_env
|
return container_build_env
|
||||||
|
|
||||||
|
|
||||||
def process_container(stack: str,
|
def process_container(container,
|
||||||
container,
|
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
container_build_env: dict,
|
container_build_env: dict,
|
||||||
dev_root_path: str,
|
dev_root_path: str,
|
||||||
@ -71,29 +69,12 @@ def process_container(stack: str,
|
|||||||
):
|
):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building: {container}")
|
print(f"Building: {container}")
|
||||||
|
build_dir = os.path.join(container_build_dir, container.replace("/", "-"))
|
||||||
default_container_tag = f"{container}:local"
|
build_script_filename = os.path.join(build_dir, "build.sh")
|
||||||
container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
|
||||||
|
|
||||||
# Check if this is in an external stack
|
|
||||||
if stack_is_external(stack):
|
|
||||||
container_parent_dir = Path(stack).joinpath("container-build")
|
|
||||||
temp_build_dir = container_parent_dir.joinpath(container.replace("/", "-"))
|
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
|
||||||
# Now check if the container exists in the external stack.
|
|
||||||
if not temp_build_script_filename.exists():
|
|
||||||
# If not, revert to building an internal container
|
|
||||||
container_parent_dir = container_build_dir
|
|
||||||
else:
|
|
||||||
container_parent_dir = container_build_dir
|
|
||||||
|
|
||||||
build_dir = container_parent_dir.joinpath(container.replace("/", "-"))
|
|
||||||
build_script_filename = build_dir.joinpath("build.sh")
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Build script filename: {build_script_filename}")
|
print(f"Build script filename: {build_script_filename}")
|
||||||
if os.path.exists(build_script_filename):
|
if os.path.exists(build_script_filename):
|
||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename
|
||||||
else:
|
else:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(f"No script file found: {build_script_filename}, using default build script")
|
||||||
@ -103,11 +84,8 @@ def process_container(stack: str,
|
|||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
build_command = os.path.join(container_build_dir,
|
build_command = os.path.join(container_build_dir,
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
"default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
|
||||||
if "PATH" not in container_build_env:
|
|
||||||
container_build_env["PATH"] = os.environ["PATH"]
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command} with environment: {container_build_env}")
|
print(f"Executing: {build_command} with environment: {container_build_env}")
|
||||||
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
|
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
|
||||||
@ -123,7 +101,6 @@ def process_container(stack: str,
|
|||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option('--include', help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option('--exclude', help="don\'t build these containers")
|
||||||
@ -164,8 +141,6 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config['containers']
|
||||||
else:
|
else:
|
||||||
containers_in_scope = all_containers
|
containers_in_scope = all_containers
|
||||||
@ -183,7 +158,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
process_container(stack, container, container_build_dir, container_build_env,
|
process_container(container, container_build_dir, container_build_env,
|
||||||
dev_root_path, quiet, verbose, dry_run, continue_on_error)
|
dev_root_path, quiet, verbose, dry_run, continue_on_error)
|
||||||
else:
|
else:
|
||||||
if verbose:
|
if verbose:
|
||||||
|
@ -25,17 +25,15 @@ from decouple import config
|
|||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.build import build_containers
|
from stack_orchestrator.build import build_containers
|
||||||
from stack_orchestrator.deploy.webapp.util import determine_base_container
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option('--base-container', default="cerc/nextjs-base")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args):
|
||||||
'''build the specified webapp container'''
|
'''build the specified webapp container'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
@ -58,27 +56,22 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if not quiet:
|
if not quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
|
||||||
base_container = determine_base_container(source_repo)
|
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
force_rebuild, extra_build_args)
|
force_rebuild, extra_build_args)
|
||||||
|
|
||||||
build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
||||||
verbose, dry_run, continue_on_error)
|
verbose, dry_run, continue_on_error)
|
||||||
|
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||||
base_container.replace("/", "-"),
|
base_container.replace("/", "-"),
|
||||||
"Dockerfile.webapp")
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
|
container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
|
||||||
else:
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag
|
|
||||||
|
|
||||||
build_containers.process_container(None, base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
build_containers.process_container(base_container, container_build_dir, container_build_env, dev_root_path, quiet,
|
||||||
verbose, dry_run, continue_on_error)
|
verbose, dry_run, continue_on_error)
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright © 2023 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
cluster_name_prefix = "laconic-"
|
|
||||||
stack_file_name = "stack.yml"
|
|
||||||
spec_file_name = "spec.yml"
|
|
||||||
config_file_name = "config.env"
|
|
||||||
deployment_file_name = "deployment.yml"
|
|
||||||
compose_dir_name = "compose"
|
|
||||||
compose_deploy_type = "compose"
|
|
||||||
k8s_kind_deploy_type = "k8s-kind"
|
|
||||||
k8s_deploy_type = "k8s"
|
|
||||||
cluster_id_key = "cluster-id"
|
|
||||||
kube_config_key = "kube-config"
|
|
||||||
deploy_to_key = "deploy-to"
|
|
||||||
network_key = "network"
|
|
||||||
http_proxy_key = "http-proxy"
|
|
||||||
image_registry_key = "image-registry"
|
|
||||||
configmaps_key = "configmaps"
|
|
||||||
resources_key = "resources"
|
|
||||||
volumes_key = "volumes"
|
|
||||||
security_key = "security"
|
|
||||||
annotations_key = "annotations"
|
|
||||||
labels_key = "labels"
|
|
||||||
kind_config_filename = "kind-config.yml"
|
|
||||||
kube_config_filename = "kubeconfig.yml"
|
|
@ -1,13 +0,0 @@
|
|||||||
services:
|
|
||||||
registry:
|
|
||||||
image: registry:2.8
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
REGISTRY_LOG_LEVEL: ${REGISTRY_LOG_LEVEL}
|
|
||||||
volumes:
|
|
||||||
- registry-data:/var/lib/registry
|
|
||||||
ports:
|
|
||||||
- "5000"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
registry-data:
|
|
@ -4,6 +4,6 @@ services:
|
|||||||
image: cerc/laconic-console-host:local
|
image: cerc/laconic-console-host:local
|
||||||
environment:
|
environment:
|
||||||
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production}
|
- CERC_WEBAPP_FILES_DIR=${CERC_WEBAPP_FILES_DIR:-/usr/local/share/.config/yarn/global/node_modules/@cerc-io/console-app/dist/production}
|
||||||
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost:9473}
|
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "80"
|
||||||
|
@ -5,7 +5,7 @@ services:
|
|||||||
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
# The cosmos-sdk node's database directory:
|
# The cosmos-sdk node's database directory:
|
||||||
- laconicd-data:/root/.laconicd
|
- laconicd-data:/root/.laconicd/data
|
||||||
# TODO: look at folding these scripts into the container
|
# TODO: look at folding these scripts into the container
|
||||||
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
||||||
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
||||||
|
@ -6,8 +6,8 @@ services:
|
|||||||
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
|
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
|
||||||
fixturenet-optimism-contracts:
|
fixturenet-optimism-contracts:
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
image: cerc/optimism-contracts:local
|
|
||||||
hostname: fixturenet-optimism-contracts
|
hostname: fixturenet-optimism-contracts
|
||||||
|
image: cerc/optimism-contracts:local
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-optimism/l1-params.env
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
environment:
|
environment:
|
||||||
@ -17,49 +17,27 @@ services:
|
|||||||
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
|
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
|
||||||
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
|
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
|
||||||
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
|
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
|
||||||
|
CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
|
||||||
|
CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
|
||||||
|
# Waits for L1 endpoint to be up before running the script
|
||||||
|
command: |
|
||||||
|
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
|
- ../config/network/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
|
||||||
- ../config/fixturenet-optimism/optimism-contracts/deploy-contracts.sh:/app/packages/contracts-bedrock/deploy-contracts.sh
|
- ../config/optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
|
||||||
|
- ../config/optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
|
||||||
|
- ../config/optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
|
||||||
|
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
|
||||||
|
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
|
||||||
- l2_accounts:/l2-accounts
|
- l2_accounts:/l2-accounts
|
||||||
- l1_deployment:/l1-deployment
|
- l1_deployment:/app/packages/contracts-bedrock
|
||||||
- l2_config:/l2-config
|
|
||||||
# Waits for L1 endpoint to be up before running the contract deploy script
|
|
||||||
command: |
|
|
||||||
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./deploy-contracts.sh"
|
|
||||||
|
|
||||||
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
|
||||||
op-geth:
|
|
||||||
restart: always
|
|
||||||
image: cerc/optimism-l2geth:local
|
|
||||||
hostname: op-geth
|
|
||||||
depends_on:
|
|
||||||
op-node:
|
|
||||||
condition: service_started
|
|
||||||
volumes:
|
|
||||||
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
|
|
||||||
- l2_config:/l2-config:ro
|
|
||||||
- l2_accounts:/l2-accounts:ro
|
|
||||||
- l2_geth_data:/datadir
|
|
||||||
entrypoint: "sh"
|
|
||||||
command: "/run-op-geth.sh"
|
|
||||||
ports:
|
|
||||||
- "8545"
|
|
||||||
- "8546"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost:8545"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 100
|
|
||||||
start_period: 10s
|
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Runs the L2 consensus client (Sequencer node)
|
# Generates the config files required for L2 (outputs to volume l2_config)
|
||||||
# Generates the L2 config files if not already present (outputs to volume l2_config)
|
op-node-l2-config-gen:
|
||||||
op-node:
|
restart: on-failure
|
||||||
restart: always
|
|
||||||
image: cerc/optimism-op-node:local
|
image: cerc/optimism-op-node:local
|
||||||
hostname: op-node
|
|
||||||
depends_on:
|
depends_on:
|
||||||
fixturenet-optimism-contracts:
|
fixturenet-optimism-contracts:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
@ -69,19 +47,61 @@ services:
|
|||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_L1_RPC: ${CERC_L1_RPC}
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/fixturenet-optimism/run-op-node.sh:/run-op-node.sh
|
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
|
||||||
- l1_deployment:/l1-deployment:ro
|
- l1_deployment:/contracts-bedrock:ro
|
||||||
- l2_config:/l2-config
|
- l2_config:/app
|
||||||
|
command: ["sh", "/app/generate-l2-config.sh"]
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
|
||||||
|
op-geth:
|
||||||
|
restart: always
|
||||||
|
image: cerc/optimism-l2geth:local
|
||||||
|
depends_on:
|
||||||
|
op-node-l2-config-gen:
|
||||||
|
condition: service_started
|
||||||
|
volumes:
|
||||||
|
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
|
||||||
|
- l2_config:/op-node:ro
|
||||||
- l2_accounts:/l2-accounts:ro
|
- l2_accounts:/l2-accounts:ro
|
||||||
|
- l2_geth_data:/datadir
|
||||||
entrypoint: "sh"
|
entrypoint: "sh"
|
||||||
command: "/run-op-node.sh"
|
command: "/run-op-geth.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8547"
|
- "0.0.0.0:8545:8545"
|
||||||
|
- "0.0.0.0:8546:8546"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "nc", "-vz", "localhost:8545"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 10
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
# Runs the L2 consensus client (Sequencer node)
|
||||||
|
op-node:
|
||||||
|
restart: always
|
||||||
|
image: cerc/optimism-op-node:local
|
||||||
|
depends_on:
|
||||||
|
op-geth:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file:
|
||||||
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
|
environment:
|
||||||
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
|
volumes:
|
||||||
|
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
|
||||||
|
- l2_config:/op-node-data:ro
|
||||||
|
- l2_accounts:/l2-accounts:ro
|
||||||
|
command: ["sh", "/app/run-op-node.sh"]
|
||||||
|
ports:
|
||||||
|
- "0.0.0.0:8547:8547"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-vz", "localhost:8547"]
|
test: ["CMD", "nc", "-vz", "localhost:8547"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 100
|
retries: 10
|
||||||
start_period: 10s
|
start_period: 10s
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
@ -90,7 +110,6 @@ services:
|
|||||||
op-batcher:
|
op-batcher:
|
||||||
restart: always
|
restart: always
|
||||||
image: cerc/optimism-op-batcher:local
|
image: cerc/optimism-op-batcher:local
|
||||||
hostname: op-batcher
|
|
||||||
depends_on:
|
depends_on:
|
||||||
op-node:
|
op-node:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@ -110,7 +129,7 @@ services:
|
|||||||
command: |
|
command: |
|
||||||
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8548"
|
- "127.0.0.1:8548:8548"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
@ -118,29 +137,25 @@ services:
|
|||||||
op-proposer:
|
op-proposer:
|
||||||
restart: always
|
restart: always
|
||||||
image: cerc/optimism-op-proposer:local
|
image: cerc/optimism-op-proposer:local
|
||||||
hostname: op-proposer
|
|
||||||
depends_on:
|
depends_on:
|
||||||
op-node:
|
op-node:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
op-geth:
|
|
||||||
condition: service_healthy
|
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-optimism/l1-params.env
|
- ../config/fixturenet-optimism/l1-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_L1_RPC: ${CERC_L1_RPC}
|
CERC_L1_RPC: ${CERC_L1_RPC}
|
||||||
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/network/wait-for-it.sh:/wait-for-it.sh
|
- ../config/network/wait-for-it.sh:/wait-for-it.sh
|
||||||
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
|
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
|
||||||
- l1_deployment:/l1-deployment:ro
|
- l1_deployment:/contracts-bedrock:ro
|
||||||
- l2_accounts:/l2-accounts:ro
|
- l2_accounts:/l2-accounts:ro
|
||||||
entrypoint: ["sh", "-c"]
|
entrypoint: ["sh", "-c"]
|
||||||
# Waits for L1 endpoint to be up before running the proposer
|
# Waits for L1 endpoint to be up before running the proposer
|
||||||
command: |
|
command: |
|
||||||
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
|
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
|
||||||
ports:
|
ports:
|
||||||
- "8560"
|
- "127.0.0.1:8560:8560"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
version: '3.7'
|
|
||||||
|
|
||||||
services:
|
|
||||||
# Runs an Urbit fake ship and attempts an app installation using given data
|
|
||||||
# Uploads the app glob to given IPFS endpoint
|
|
||||||
# From urbit_app_builds volume:
|
|
||||||
# - takes app build from ${CERC_URBIT_APP}/build (waits for it to appear)
|
|
||||||
# - takes additional mark files from ${CERC_URBIT_APP}/mar
|
|
||||||
# - takes the docket file from ${CERC_URBIT_APP}/desk.docket-0
|
|
||||||
urbit-fake-ship:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: tloncorp/vere
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_URBIT_APP: ${CERC_URBIT_APP}
|
|
||||||
CERC_ENABLE_APP_INSTALL: ${CERC_ENABLE_APP_INSTALL:-true}
|
|
||||||
CERC_IPFS_GLOB_HOST_ENDPOINT: ${CERC_IPFS_GLOB_HOST_ENDPOINT:-http://ipfs:5001}
|
|
||||||
CERC_IPFS_SERVER_ENDPOINT: ${CERC_IPFS_SERVER_ENDPOINT:-http://ipfs:8080}
|
|
||||||
entrypoint: ["bash", "-c", "./run-urbit-ship.sh && ./deploy-app.sh && tail -f /dev/null"]
|
|
||||||
volumes:
|
|
||||||
- urbit_data:/urbit
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/urbit/run-urbit-ship.sh:/urbit/run-urbit-ship.sh
|
|
||||||
- ../config/urbit/deploy-app.sh:/urbit/deploy-app.sh
|
|
||||||
ports:
|
|
||||||
- "80"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "80"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_data:
|
|
||||||
urbit_app_builds:
|
|
@ -1,23 +0,0 @@
|
|||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
grafana:
|
|
||||||
image: grafana/grafana:10.2.2
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL}
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/grafana/provisioning:/etc/grafana/provisioning
|
|
||||||
- ../config/monitoring/grafana/dashboards:/etc/grafana/dashboards
|
|
||||||
- grafana_storage:/var/lib/grafana
|
|
||||||
ports:
|
|
||||||
- "3000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "3000"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 10
|
|
||||||
start_period: 3s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
grafana_storage:
|
|
@ -1,24 +1,13 @@
|
|||||||
version: "3.2"
|
version: "3.2"
|
||||||
|
|
||||||
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
|
# See: https://docs.ipfs.tech/install/run-ipfs-inside-docker/#set-up
|
||||||
services:
|
services:
|
||||||
ipfs:
|
ipfs:
|
||||||
image: ipfs/kubo:v0.24.0
|
image: ipfs/kubo:v0.24.0
|
||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- ipfs-import:/import
|
- ./ipfs/import:/import
|
||||||
- ipfs-data:/data/ipfs
|
- ./ipfs/data:/data/ipfs
|
||||||
ports:
|
ports:
|
||||||
- "4001"
|
- "0.0.0.0:8080:8080"
|
||||||
- "8080"
|
- "0.0.0.0:4001:4001"
|
||||||
- "0.0.0.0:5001:5001"
|
- "0.0.0.0:5001:5001"
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "5001"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
ipfs-import:
|
|
||||||
ipfs-data:
|
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
mars:
|
|
||||||
image: cerc/mars-v2:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
environment:
|
|
||||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
|
||||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
|
@ -1,20 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
mars:
|
|
||||||
image: cerc/mars:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
environment:
|
|
||||||
- URL_OSMOSIS_GQL=https://osmosis-node.marsprotocol.io/GGSFGSFGFG34/osmosis-hive-front/graphql
|
|
||||||
- URL_OSMOSIS_REST=https://lcd-osmosis.blockapsis.com
|
|
||||||
- URL_OSMOSIS_RPC=https://rpc-osmosis.blockapsis.com
|
|
||||||
- URL_NEUTRON_GQL=https://neutron.rpc.p2p.world/qgrnU6PsQZA8F9S5Fb8Fn3tV3kXmMBl2M9bcc9jWLjQy8p/hive/graphql
|
|
||||||
- URL_NEUTRON_REST=https://rest-kralum.neutron-1.neutron.org
|
|
||||||
- URL_NEUTRON_RPC=https://rpc-kralum.neutron-1.neutron.org
|
|
||||||
- URL_NEUTRON_TEST_GQL=https://testnet-neutron-gql.marsprotocol.io/graphql
|
|
||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
node-exporter:
|
|
||||||
image: prom/node-exporter:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
command:
|
|
||||||
- '--web.listen-address=:9100'
|
|
||||||
- '--path.rootfs=/host'
|
|
||||||
- '--collector.systemd'
|
|
||||||
- '--collector.processes'
|
|
||||||
network_mode: host
|
|
||||||
pid: host
|
|
||||||
volumes:
|
|
||||||
- '/:/host:ro,rslave'
|
|
@ -1,22 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
osmosis-front-end:
|
|
||||||
image: cerc/osmosis-front-end-urbit:local
|
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- NEXT_PUBLIC_WEB_API_BASE_URL=${CERC_WEB_API_BASE_URL}
|
|
||||||
- ASSET_LIST_COMMIT_HASH=a326bcefc51372b4912be5a2a2fa84a5d142a438
|
|
||||||
- NEXT_PUBLIC_BASEPATH=/apps/osmosis
|
|
||||||
- NEXT_PUBLIC_URBIT_DEPLOYMENT=true
|
|
||||||
working_dir: /app/packages/web
|
|
||||||
command: ["./build-app-for-urbit.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/build-app-for-urbit.sh:/app/packages/web/build-app-for-urbit.sh
|
|
||||||
- ../config/osmosis/.env.production:/app/packages/web/.env.production
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/osmosis/urbit-files/mar:/app/packages/web/mar
|
|
||||||
- ../config/osmosis/urbit-files/desk.docket-0:/app/packages/web/desk.docket-0
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_app_builds:
|
|
@ -3,34 +3,6 @@ version: "3.2"
|
|||||||
services:
|
services:
|
||||||
osmosis-front-end:
|
osmosis-front-end:
|
||||||
image: cerc/osmosis-front-end:local
|
image: cerc/osmosis-front-end:local
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- NEXT_PUBLIC_WEB_API_BASE_URL=${CERC_WEB_API_BASE_URL}
|
|
||||||
- ASSET_LIST_COMMIT_HASH=a326bcefc51372b4912be5a2a2fa84a5d142a438
|
|
||||||
working_dir: /app/packages/web
|
|
||||||
command: ["./build-app.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/build-app.sh:/app/packages/web/build-app.sh
|
|
||||||
- ../config/osmosis/.env.production:/app/packages/web/.env.production
|
|
||||||
- app_builds:/app-builds
|
|
||||||
|
|
||||||
nginx:
|
|
||||||
image: nginx:1.23-alpine
|
|
||||||
restart: always
|
restart: always
|
||||||
depends_on:
|
|
||||||
osmosis-front-end:
|
|
||||||
condition: service_completed_successfully
|
|
||||||
volumes:
|
|
||||||
- ../config/osmosis/nginx:/etc/nginx/conf.d
|
|
||||||
- app_builds:/usr/share/nginx
|
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "3002:3002" #TODO make `3000` when using the deployment feature
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "80"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
app_builds:
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
ping-pub:
|
|
||||||
image: cerc/ping-pub:local
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "5173:5173"
|
|
@ -1,57 +0,0 @@
|
|||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
prometheus:
|
|
||||||
image: prom/prometheus:v2.49.1
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/prometheus:/etc/prometheus
|
|
||||||
- prometheus_data:/prometheus
|
|
||||||
ports:
|
|
||||||
- "9090"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9090"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 10
|
|
||||||
start_period: 3s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
blackbox:
|
|
||||||
image: prom/blackbox-exporter:latest
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/blackbox.yml:/etc/blackbox_exporter/config.yml
|
|
||||||
ports:
|
|
||||||
- '9115'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
chain-head-exporter:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: always
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
|
||||||
FIL_RPC_ENDPOINT: ${CERC_FIL_RPC_ENDPOINT}
|
|
||||||
ETH_RPC_API_KEY: ${CERC_INFURA_KEY}
|
|
||||||
PORT: ${CERC_METRICS_PORT}
|
|
||||||
command: ["sh", "-c", "yarn export-metrics:chain-heads"]
|
|
||||||
ports:
|
|
||||||
- '5000'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
postgres-exporter:
|
|
||||||
image: quay.io/prometheuscommunity/postgres-exporter
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../config/monitoring/postgres-exporter.yml:/postgres_exporter.yml
|
|
||||||
ports:
|
|
||||||
- '9187'
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
prometheus_data:
|
|
@ -1,22 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
proxy-server:
|
|
||||||
image: cerc/watcher-ts:local
|
|
||||||
restart: on-failure
|
|
||||||
working_dir: /app/packages/cli
|
|
||||||
environment:
|
|
||||||
ENABLE_PROXY: ${CERC_ENABLE_PROXY:-true}
|
|
||||||
PROXY_UPSTREAM: ${CERC_PROXY_UPSTREAM}
|
|
||||||
PROXY_ORIGIN_HEADER: ${CERC_PROXY_ORIGIN_HEADER}
|
|
||||||
command: ["sh", "-c", "./run.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/proxy-server/run.sh:/app/packages/cli/run.sh
|
|
||||||
ports:
|
|
||||||
- "4000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-v", "localhost", "4000"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 10s
|
|
@ -1,20 +0,0 @@
|
|||||||
services:
|
|
||||||
|
|
||||||
database:
|
|
||||||
image: cerc/test-database-container:local
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- db-data:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "test-user"
|
|
||||||
POSTGRES_DB: "test-db"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
|
||||||
ports:
|
|
||||||
- "5432"
|
|
||||||
|
|
||||||
test-client:
|
|
||||||
image: cerc/test-database-client:local
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
db-data:
|
|
@ -5,15 +5,10 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
CERC_TEST_PARAM_1: ${CERC_TEST_PARAM_1:-FAILED}
|
||||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
|
||||||
volumes:
|
volumes:
|
||||||
- test-data-bind:/data
|
- test-data:/data
|
||||||
- test-data-auto:/data2
|
|
||||||
- test-config:/config:ro
|
|
||||||
ports:
|
ports:
|
||||||
- "80"
|
- "80"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
test-data-bind:
|
test-data:
|
||||||
test-data-auto:
|
|
||||||
test-config:
|
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
version: "3.2"
|
|
||||||
|
|
||||||
services:
|
|
||||||
uniswap-interface:
|
|
||||||
image: cerc/uniswap-interface:local
|
|
||||||
restart: on-failure
|
|
||||||
environment:
|
|
||||||
- REACT_APP_INFURA_KEY=${CERC_INFURA_KEY}
|
|
||||||
- REACT_APP_AWS_API_ENDPOINT=${CERC_UNISWAP_GQL}
|
|
||||||
command: ["./build-app.sh"]
|
|
||||||
volumes:
|
|
||||||
- ../config/uniswap-interface/build-app.sh:/app/build-app.sh
|
|
||||||
- urbit_app_builds:/app-builds
|
|
||||||
- ../config/uniswap-interface/urbit-files/mar:/app/mar
|
|
||||||
- ../config/uniswap-interface/urbit-files/desk.docket-0:/app/desk.docket-0
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
urbit_app_builds:
|
|
@ -10,7 +10,6 @@ services:
|
|||||||
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
|
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
|
||||||
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
|
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
|
||||||
- POSTGRES_PASSWORD=password
|
- POSTGRES_PASSWORD=password
|
||||||
command: ["postgres", "-c", "max_connections=200"]
|
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
|
||||||
- watcher_db_data:/var/lib/postgresql/data
|
- watcher_db_data:/var/lib/postgresql/data
|
||||||
@ -23,38 +22,6 @@ services:
|
|||||||
retries: 15
|
retries: 15
|
||||||
start_period: 10s
|
start_period: 10s
|
||||||
|
|
||||||
# Starts the azimuth-watcher job runner
|
|
||||||
azimuth-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CERC_HISTORICAL_BLOCK_RANGE: 500
|
|
||||||
CONTRACT_ADDRESS: 0x223c067F8CF28ae173EE5CafEa60cA44C335fecB
|
|
||||||
CONTRACT_NAME: Azimuth
|
|
||||||
STARTING_BLOCK: 6784880
|
|
||||||
working_dir: /app/packages/azimuth-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/azimuth-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9000"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9000"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the azimuth-watcher server
|
# Starts the azimuth-watcher server
|
||||||
azimuth-watcher-server:
|
azimuth-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -62,8 +29,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
azimuth-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -85,37 +52,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the censures-watcher job runner
|
|
||||||
censures-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0x325f68d32BdEe6Ed86E7235ff2480e2A433D6189
|
|
||||||
CONTRACT_NAME: Censures
|
|
||||||
STARTING_BLOCK: 6784954
|
|
||||||
working_dir: /app/packages/censures-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/censures-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9002"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9002"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the censures-watcher server
|
# Starts the censures-watcher server
|
||||||
censures-watcher-server:
|
censures-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -123,8 +59,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
censures-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -146,37 +82,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the claims-watcher job runner
|
|
||||||
claims-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0xe7e7f69b34D7d9Bd8d61Fb22C33b22708947971A
|
|
||||||
CONTRACT_NAME: Claims
|
|
||||||
STARTING_BLOCK: 6784941
|
|
||||||
working_dir: /app/packages/claims-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/claims-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9004"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9004"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the claims-watcher server
|
# Starts the claims-watcher server
|
||||||
claims-watcher-server:
|
claims-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -184,8 +89,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
claims-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -207,37 +112,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the conditional-star-release-watcher job runner
|
|
||||||
conditional-star-release-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0x8C241098C3D3498Fe1261421633FD57986D74AeA
|
|
||||||
CONTRACT_NAME: ConditionalStarRelease
|
|
||||||
STARTING_BLOCK: 6828004
|
|
||||||
working_dir: /app/packages/conditional-star-release-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/conditional-star-release-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9006"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9006"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the conditional-star-release-watcher server
|
# Starts the conditional-star-release-watcher server
|
||||||
conditional-star-release-watcher-server:
|
conditional-star-release-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -245,8 +119,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
conditional-star-release-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -268,37 +142,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the delegated-sending-watcher job runner
|
|
||||||
delegated-sending-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0xf6b461fE1aD4bd2ce25B23Fe0aff2ac19B3dFA76
|
|
||||||
CONTRACT_NAME: DelegatedSending
|
|
||||||
STARTING_BLOCK: 6784956
|
|
||||||
working_dir: /app/packages/delegated-sending-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/delegated-sending-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9008"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9008"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the delegated-sending-watcher server
|
# Starts the delegated-sending-watcher server
|
||||||
delegated-sending-watcher-server:
|
delegated-sending-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -306,8 +149,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
delegated-sending-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -329,37 +172,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the ecliptic-watcher job runner
|
|
||||||
ecliptic-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0x33EeCbf908478C10614626A9D304bfe18B78DD73
|
|
||||||
CONTRACT_NAME: Ecliptic
|
|
||||||
STARTING_BLOCK: 13692129
|
|
||||||
working_dir: /app/packages/ecliptic-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/ecliptic-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9010"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9010"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the ecliptic-watcher server
|
# Starts the ecliptic-watcher server
|
||||||
ecliptic-watcher-server:
|
ecliptic-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -367,8 +179,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
ecliptic-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -390,37 +202,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the linear-star-release-watcher job runner
|
|
||||||
linear-star-release-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0x86cd9cd0992F04231751E3761De45cEceA5d1801
|
|
||||||
CONTRACT_NAME: LinearStarRelease
|
|
||||||
STARTING_BLOCK: 6784943
|
|
||||||
working_dir: /app/packages/linear-star-release-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/linear-star-release-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9012"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9012"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the linear-star-release-watcher server
|
# Starts the linear-star-release-watcher server
|
||||||
linear-star-release-watcher-server:
|
linear-star-release-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -428,8 +209,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
linear-star-release-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
@ -451,37 +232,6 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
# Starts the polls-watcher job runner
|
|
||||||
polls-watcher-job-runner:
|
|
||||||
image: cerc/watcher-azimuth:local
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
watcher-db:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
|
||||||
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
|
|
||||||
CONTRACT_ADDRESS: 0x7fEcaB617c868Bb5996d99D95200D2Fa708218e4
|
|
||||||
CONTRACT_NAME: Polls
|
|
||||||
STARTING_BLOCK: 6784912
|
|
||||||
working_dir: /app/packages/polls-watcher
|
|
||||||
command: "./start-job-runner.sh"
|
|
||||||
volumes:
|
|
||||||
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
|
|
||||||
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
|
|
||||||
- ../config/watcher-azimuth/start-job-runner.sh:/app/packages/polls-watcher/start-job-runner.sh
|
|
||||||
ports:
|
|
||||||
- "9014"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "nc", "-vz", "localhost", "9014"]
|
|
||||||
interval: 20s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 15
|
|
||||||
start_period: 5s
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
# Starts the polls-watcher server
|
# Starts the polls-watcher server
|
||||||
polls-watcher-server:
|
polls-watcher-server:
|
||||||
image: cerc/watcher-azimuth:local
|
image: cerc/watcher-azimuth:local
|
||||||
@ -489,8 +239,8 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
watcher-db:
|
watcher-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
polls-watcher-job-runner:
|
env_file:
|
||||||
condition: service_healthy
|
- ../config/watcher-azimuth/watcher-params.env
|
||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
|
||||||
|
@ -35,7 +35,7 @@ services:
|
|||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
||||||
ports:
|
ports:
|
||||||
- "9002:9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
@ -56,13 +56,14 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-merkl-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-merkl-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3007:3008"
|
- "127.0.0.1:3007:3008"
|
||||||
- "9003:9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
|
@ -35,7 +35,7 @@ services:
|
|||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
- ../config/watcher-sushiswap-v3/start-job-runner.sh:/app/start-job-runner.sh
|
||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
test: ["CMD", "nc", "-v", "localhost", "9000"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
@ -56,13 +56,14 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
||||||
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT}
|
||||||
|
SUSHISWAP_START_BLOCK: ${SUSHISWAP_START_BLOCK:- 2867560}
|
||||||
command: ["bash", "./start-server.sh"]
|
command: ["bash", "./start-server.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
- ../config/watcher-sushiswap-v3/watcher-config-template.toml:/app/environments/watcher-config-template.toml
|
||||||
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
- ../config/watcher-sushiswap-v3/start-server.sh:/app/start-server.sh
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3008:3008"
|
- "127.0.0.1:3008:3008"
|
||||||
- "9001:9001"
|
- "9001"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
test: ["CMD", "nc", "-v", "localhost", "3008"]
|
||||||
interval: 20s
|
interval: 20s
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
services:
|
|
||||||
webapp:
|
|
||||||
image: cerc/webapp-container:local
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
|
|
||||||
ports:
|
|
||||||
- "80"
|
|
@ -14,17 +14,13 @@ LOGLEVEL="info"
|
|||||||
TRACE="--trace"
|
TRACE="--trace"
|
||||||
# TRACE=""
|
# TRACE=""
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|
||||||
# validate dependencies are installed
|
# validate dependencies are installed
|
||||||
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
|
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
|
||||||
|
|
||||||
# remove existing daemon and client
|
# remove existing daemon and client
|
||||||
rm -rf $HOME/.laconicd/*
|
rm -rf ~/.laconic*
|
||||||
rm -rf $HOME/.laconic/*
|
|
||||||
|
|
||||||
if [ -n "`which make`" ]; then
|
|
||||||
make install
|
make install
|
||||||
fi
|
|
||||||
|
|
||||||
laconicd config keyring-backend $KEYRING
|
laconicd config keyring-backend $KEYRING
|
||||||
laconicd config chain-id $CHAINID
|
laconicd config chain-id $CHAINID
|
||||||
@ -117,9 +113,6 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
if [[ $1 == "pending" ]]; then
|
if [[ $1 == "pending" ]]; then
|
||||||
echo "pending mode is on, please wait for the first block committed."
|
echo "pending mode is on, please wait for the first block committed."
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
echo "Using existing database at $HOME/.laconicd. To replace, run '`basename $0` clean'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
||||||
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground
|
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground
|
||||||
|
37
stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh
Executable file
37
stack_orchestrator/data/config/fixturenet-optimism/generate-l2-config.sh
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
|
# Check existing config if it exists
|
||||||
|
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
|
||||||
|
echo "Found existing L2 config, cross-checking with L1 deployment config"
|
||||||
|
|
||||||
|
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
|
||||||
|
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
|
||||||
|
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
|
||||||
|
|
||||||
|
GEN_L2_CONF=$(cat /app/rollup.json)
|
||||||
|
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
|
||||||
|
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
|
||||||
|
|
||||||
|
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
|
||||||
|
echo "Config cross-checked, exiting"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
op-node genesis l2 \
|
||||||
|
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
|
||||||
|
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
|
||||||
|
--outfile.l2 /app/genesis.json \
|
||||||
|
--outfile.rollup /app/rollup.json \
|
||||||
|
--l1-rpc $CERC_L1_RPC
|
||||||
|
|
||||||
|
openssl rand -hex 32 > /app/jwt.txt
|
@ -1,172 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
|
||||||
|
|
||||||
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
|
|
||||||
|
|
||||||
export DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
# Optional create2 salt for deterministic deployment of contract implementations
|
|
||||||
export IMPL_SALT=$(openssl rand -hex 32)
|
|
||||||
|
|
||||||
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
|
|
||||||
|
|
||||||
# Exit if a deployment already exists (on restarts)
|
|
||||||
if [ -d "/l1-deployment/$DEPLOYMENT_CONTEXT" ]; then
|
|
||||||
echo "Deployment directory /l1-deployment/$DEPLOYMENT_CONTEXT, checking OptimismPortal deployment"
|
|
||||||
|
|
||||||
OPTIMISM_PORTAL_ADDRESS=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/OptimismPortal.json | jq -r .address)
|
|
||||||
contract_code=$(cast code $OPTIMISM_PORTAL_ADDRESS --rpc-url $CERC_L1_RPC)
|
|
||||||
|
|
||||||
if [ -z "${contract_code#0x}" ]; then
|
|
||||||
echo "Error: A deployment directory was found in the volume, but no contract code was found on-chain at the associated address. Please clear L1 deployment volume before restarting."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Deployment found, exiting (successfully)."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
wait_for_block() {
|
|
||||||
local block="$1" # Block to wait for
|
|
||||||
local timeout="$2" # Max time to wait in seconds
|
|
||||||
|
|
||||||
echo "Waiting for block $block."
|
|
||||||
i=0
|
|
||||||
loops=$(($timeout/10))
|
|
||||||
while [ -z "$block_result" ] && [[ "$i" -lt "$loops" ]]; do
|
|
||||||
sleep 10
|
|
||||||
echo "Checking..."
|
|
||||||
block_result=$(cast block $block --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
i=$(($i + 1))
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# We need four accounts and their private keys for the deployment: Admin, Proposer, Batcher, and Sequencer
|
|
||||||
# If $CERC_L1_ADDRESS and $CERC_L1_PRIV_KEY have been set, we'll assign it to Admin and generate/fund the remaining three accounts from it
|
|
||||||
# If not, we'll assume the L1 is the stack's own fixturenet-eth and use the pre-funded accounts/keys from $CERC_L1_ACCOUNTS_CSV_URL
|
|
||||||
if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then
|
|
||||||
wallet1=$(cast wallet new)
|
|
||||||
wallet2=$(cast wallet new)
|
|
||||||
wallet3=$(cast wallet new)
|
|
||||||
# Admin
|
|
||||||
ADMIN=$CERC_L1_ADDRESS
|
|
||||||
ADMIN_KEY=$CERC_L1_PRIV_KEY
|
|
||||||
# Proposer
|
|
||||||
PROPOSER=$(echo "$wallet1" | awk '/Address:/{print $2}')
|
|
||||||
PROPOSER_KEY=$(echo "$wallet1" | awk '/Private key:/{print $3}')
|
|
||||||
# Batcher
|
|
||||||
BATCHER=$(echo "$wallet2" | awk '/Address:/{print $2}')
|
|
||||||
BATCHER_KEY=$(echo "$wallet2" | awk '/Private key:/{print $3}')
|
|
||||||
# Sequencer
|
|
||||||
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
|
||||||
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
|
||||||
|
|
||||||
echo "Funding accounts."
|
|
||||||
wait_for_block 1 300
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 10ether $BATCHER --private-key $ADMIN_KEY
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 2ether $SEQ --private-key $ADMIN_KEY
|
|
||||||
else
|
|
||||||
curl -o accounts.csv $CERC_L1_ACCOUNTS_CSV_URL
|
|
||||||
# Admin
|
|
||||||
ADMIN=$(awk -F ',' 'NR == 1 {print $2}' accounts.csv)
|
|
||||||
ADMIN_KEY=$(awk -F ',' 'NR == 1 {print $3}' accounts.csv)
|
|
||||||
# Proposer
|
|
||||||
PROPOSER=$(awk -F ',' 'NR == 2 {print $2}' accounts.csv)
|
|
||||||
PROPOSER_KEY=$(awk -F ',' 'NR == 2 {print $3}' accounts.csv)
|
|
||||||
# Batcher
|
|
||||||
BATCHER=$(awk -F ',' 'NR == 3 {print $2}' accounts.csv)
|
|
||||||
BATCHER_KEY=$(awk -F ',' 'NR == 3 {print $3}' accounts.csv)
|
|
||||||
# Sequencer
|
|
||||||
SEQ=$(awk -F ',' 'NR == 4 {print $2}' accounts.csv)
|
|
||||||
SEQ_KEY=$(awk -F ',' 'NR == 4 {print $3}' accounts.csv)
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using accounts:"
|
|
||||||
echo -e "Admin: $ADMIN\nProposer: $PROPOSER\nBatcher: $BATCHER\nSequencer: $SEQ"
|
|
||||||
|
|
||||||
# These accounts will be needed by other containers, so write them to a shared volume
|
|
||||||
echo "Writing accounts/private keys to volume l2_accounts."
|
|
||||||
accounts_json=$(jq -n \
|
|
||||||
--arg Admin "$ADMIN" --arg AdminKey "$ADMIN_KEY" \
|
|
||||||
--arg Proposer "$PROPOSER" --arg ProposerKey "$PROPOSER_KEY" \
|
|
||||||
--arg Batcher "$BATCHER" --arg BatcherKey "$BATCHER_KEY" \
|
|
||||||
--arg Seq "$SEQ" --arg SeqKey "$SEQ_KEY" \
|
|
||||||
'{Admin: $Admin, AdminKey: $AdminKey, Proposer: $Proposer, ProposerKey: $ProposerKey, Batcher: $Batcher, BatcherKey: $BatcherKey, Seq: $Seq, SeqKey: $SeqKey}')
|
|
||||||
echo "$accounts_json" > "/l2-accounts/accounts.json"
|
|
||||||
|
|
||||||
# Get a finalized L1 block to set as the starting point for the L2 deployment
|
|
||||||
# If the chain is a freshly created fixturenet-eth, a finalized block won't be available for many minutes; rather than wait, we can use block 1
|
|
||||||
echo "Checking L1 for finalized block..."
|
|
||||||
finalized=$(cast block finalized --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
|
|
||||||
if [ -n "$finalized" ]; then
|
|
||||||
# finalized block was found
|
|
||||||
start_block=$finalized
|
|
||||||
else
|
|
||||||
# assume fresh chain and use block 1 instead
|
|
||||||
echo "No finalized block. Using block 1 instead."
|
|
||||||
# wait for 20 or so blocks to be safe
|
|
||||||
wait_for_block 24 300
|
|
||||||
start_block=$(cast block 1 --rpc-url $CERC_L1_RPC | grep -E "(timestamp|hash|number)" || true)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$start_block" ]; then
|
|
||||||
echo "Unable to query chain for starting block. Exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
BLOCKHASH=$(echo $start_block | awk -F ' ' '{print $2}')
|
|
||||||
HEIGHT=$(echo $start_block | awk -F ' ' '{print $4}')
|
|
||||||
TIMESTAMP=$(echo $start_block | awk -F ' ' '{print $6}')
|
|
||||||
|
|
||||||
echo "Using block as deployment point:"
|
|
||||||
echo "Height: $HEIGHT"
|
|
||||||
echo "Hash: $BLOCKHASH"
|
|
||||||
echo "Timestamp: $TIMESTAMP"
|
|
||||||
|
|
||||||
# Fill out the deployment template (./deploy-config/getting-started.json) with our values:
|
|
||||||
echo "Writing deployment config."
|
|
||||||
deploy_config_file="deploy-config/$DEPLOYMENT_CONTEXT.json"
|
|
||||||
cp deploy-config/getting-started.json $deploy_config_file
|
|
||||||
sed -i "s/\"l1ChainID\": .*/\"l1ChainID\": $DEPLOYMENT_CONTEXT,/g" $deploy_config_file
|
|
||||||
sed -i "s/ADMIN/$ADMIN/g" $deploy_config_file
|
|
||||||
sed -i "s/PROPOSER/$PROPOSER/g" $deploy_config_file
|
|
||||||
sed -i "s/BATCHER/$BATCHER/g" $deploy_config_file
|
|
||||||
sed -i "s/SEQUENCER/$SEQ/g" $deploy_config_file
|
|
||||||
sed -i "s/BLOCKHASH/$BLOCKHASH/g" $deploy_config_file
|
|
||||||
sed -i "s/TIMESTAMP/$TIMESTAMP/g" $deploy_config_file
|
|
||||||
|
|
||||||
mkdir -p deployments/$DEPLOYMENT_CONTEXT
|
|
||||||
|
|
||||||
# Deployment requires the create2 deterministic proxy contract be published on L1 at address 0x4e59b44847b379578588920ca78fbf26c0b4956c
|
|
||||||
# See: https://github.com/Arachnid/deterministic-deployment-proxy
|
|
||||||
echo "Deploying create2 proxy contract..."
|
|
||||||
echo "Funding deployment signer address"
|
|
||||||
deployment_signer="0x3fab184622dc19b6109349b94811493bf2a45362"
|
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 0.5ether $deployment_signer --private-key $ADMIN_KEY
|
|
||||||
echo "Deploying contract..."
|
|
||||||
raw_bytes="0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222"
|
|
||||||
|
|
||||||
cast publish --rpc-url $CERC_L1_RPC $raw_bytes
|
|
||||||
|
|
||||||
# Create the L2 deployment
|
|
||||||
echo "Deploying L1 Optimism contracts..."
|
|
||||||
forge script scripts/Deploy.s.sol:Deploy --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC
|
|
||||||
forge script scripts/Deploy.s.sol:Deploy --sig 'sync()' --private-key $ADMIN_KEY --broadcast --rpc-url $CERC_L1_RPC
|
|
||||||
|
|
||||||
echo "*************************************"
|
|
||||||
echo "Done deploying contracts."
|
|
||||||
|
|
||||||
# Copy files needed by other containers to the appropriate shared volumes
|
|
||||||
echo "Copying deployment artifacts volume l1_deployment and deploy-config to volume l2_config"
|
|
||||||
cp -a /app/packages/contracts-bedrock/deployments/$DEPLOYMENT_CONTEXT /l1-deployment
|
|
||||||
cp /app/packages/contracts-bedrock/deploy-config/$DEPLOYMENT_CONTEXT.json /l2-config
|
|
||||||
openssl rand -hex 32 > /l2-config/l2-jwt.txt
|
|
||||||
|
|
||||||
echo "Deployment successful. Exiting"
|
|
131
stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh
Executable file
131
stack_orchestrator/data/config/fixturenet-optimism/optimism-contracts/run.sh
Executable file
@ -0,0 +1,131 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
||||||
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
|
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
|
||||||
|
|
||||||
|
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
|
||||||
|
|
||||||
|
IMPORT_1="import './verify-contract-deployment'"
|
||||||
|
IMPORT_2="import './rekey-json'"
|
||||||
|
IMPORT_3="import './send-balance'"
|
||||||
|
|
||||||
|
# Append mounted tasks to tasks/index.ts file if not present
|
||||||
|
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
|
||||||
|
echo "$IMPORT_1" >> tasks/index.ts
|
||||||
|
echo "$IMPORT_2" >> tasks/index.ts
|
||||||
|
echo "$IMPORT_3" >> tasks/index.ts
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update the chainId in the hardhat config
|
||||||
|
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
|
||||||
|
|
||||||
|
# Exit if a deployment already exists (on restarts)
|
||||||
|
# Note: fixturenet-eth-geth currently starts fresh on a restart
|
||||||
|
if [ -d "deployments/getting-started" ]; then
|
||||||
|
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
|
||||||
|
|
||||||
|
# Read JSON file into variable
|
||||||
|
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
|
||||||
|
|
||||||
|
# Parse JSON into variables
|
||||||
|
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
|
||||||
|
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
|
||||||
|
|
||||||
|
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
|
||||||
|
echo "Deployment verfication successful, exiting"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate the L2 account addresses
|
||||||
|
yarn hardhat rekey-json --output /l2-accounts/keys.json
|
||||||
|
|
||||||
|
# Read JSON file into variable
|
||||||
|
KEYS_JSON=$(cat /l2-accounts/keys.json)
|
||||||
|
|
||||||
|
# Parse JSON into variables
|
||||||
|
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
|
||||||
|
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
|
||||||
|
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
|
||||||
|
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
|
||||||
|
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
|
||||||
|
|
||||||
|
# Get the private keys of L1 accounts
|
||||||
|
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
|
||||||
|
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
|
||||||
|
[ "$l1_accounts_response" -eq 200 ];
|
||||||
|
then
|
||||||
|
echo "Fetching L1 account credentials using provided URL"
|
||||||
|
mkdir -p /geth-accounts
|
||||||
|
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
|
||||||
|
|
||||||
|
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
|
||||||
|
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
|
||||||
|
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
|
||||||
|
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
|
||||||
|
else
|
||||||
|
echo "Couldn't fetch L1 account credentials, using them from env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Send balances to the above L2 addresses
|
||||||
|
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
|
||||||
|
echo "Balances sent to L2 accounts"
|
||||||
|
|
||||||
|
# Select a finalized L1 block as the starting point for roll ups
|
||||||
|
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
|
||||||
|
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
|
||||||
|
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
|
||||||
|
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
|
||||||
|
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
|
||||||
|
|
||||||
|
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
|
||||||
|
|
||||||
|
# Update the deployment config
|
||||||
|
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
|
||||||
|
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
|
||||||
|
|
||||||
|
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
|
||||||
|
|
||||||
|
echo "Updated the deployment config"
|
||||||
|
|
||||||
|
# Create a .env file
|
||||||
|
echo "L1_RPC=$CERC_L1_RPC" > .env
|
||||||
|
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
|
||||||
|
|
||||||
|
echo "Deploying the L1 smart contracts, this will take a while..."
|
||||||
|
|
||||||
|
# Deploy the L1 smart contracts
|
||||||
|
yarn hardhat deploy --network getting-started --tags l1
|
||||||
|
|
||||||
|
echo "Deployed the L1 smart contracts"
|
||||||
|
|
||||||
|
# Read Proxy contract's JSON and get the address
|
||||||
|
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
|
||||||
|
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
|
||||||
|
|
||||||
|
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
|
||||||
|
# First account
|
||||||
|
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
|
||||||
|
# Second account
|
||||||
|
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
|
||||||
|
|
||||||
|
echo "Balance sent to Proxy L2 contract"
|
||||||
|
echo "Use following accounts for transactions in L2:"
|
||||||
|
echo "${CERC_L1_ADDRESS}"
|
||||||
|
echo "${CERC_L1_ADDRESS_2}"
|
||||||
|
echo "Done"
|
@ -0,0 +1,36 @@
|
|||||||
|
const fs = require('fs')
|
||||||
|
|
||||||
|
// Get the command-line argument
|
||||||
|
const configFile = process.argv[2]
|
||||||
|
const adminAddress = process.argv[3]
|
||||||
|
const proposerAddress = process.argv[4]
|
||||||
|
const batcherAddress = process.argv[5]
|
||||||
|
const sequencerAddress = process.argv[6]
|
||||||
|
const blockHash = process.argv[7]
|
||||||
|
|
||||||
|
// Read the JSON file
|
||||||
|
const configData = fs.readFileSync(configFile)
|
||||||
|
const configObj = JSON.parse(configData)
|
||||||
|
|
||||||
|
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
|
||||||
|
configObj.finalSystemOwner =
|
||||||
|
configObj.portalGuardian =
|
||||||
|
configObj.controller =
|
||||||
|
configObj.l2OutputOracleChallenger =
|
||||||
|
configObj.proxyAdminOwner =
|
||||||
|
configObj.baseFeeVaultRecipient =
|
||||||
|
configObj.l1FeeVaultRecipient =
|
||||||
|
configObj.sequencerFeeVaultRecipient =
|
||||||
|
configObj.governanceTokenOwner =
|
||||||
|
adminAddress
|
||||||
|
|
||||||
|
configObj.l2OutputOracleProposer = proposerAddress
|
||||||
|
|
||||||
|
configObj.batchSenderAddress = batcherAddress
|
||||||
|
|
||||||
|
configObj.p2pSequencerAddress = sequencerAddress
|
||||||
|
|
||||||
|
configObj.l1StartingBlockTag = blockHash
|
||||||
|
|
||||||
|
// Write the updated JSON object back to the file
|
||||||
|
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))
|
@ -1,155 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# To facilitate deploying the Optimism contracts, a few additional arguments have been added to the geth start command
|
|
||||||
# Otherwise this script is unchanged from the image's default startup script
|
|
||||||
|
|
||||||
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
|
|
||||||
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
|
|
||||||
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
|
|
||||||
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
|
|
||||||
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
|
|
||||||
|
|
||||||
cd /opt/testnet/build/el
|
|
||||||
python3 -m http.server 9898 &
|
|
||||||
cd $HOME
|
|
||||||
|
|
||||||
START_CMD="geth"
|
|
||||||
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
|
|
||||||
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec /usr/local/bin/geth --continue --"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# See https://linuxconfig.org/how-to-propagate-a-signal-to-child-processes-from-a-bash-script
|
|
||||||
cleanup() {
|
|
||||||
echo "Signal received, cleaning up..."
|
|
||||||
|
|
||||||
# Kill the child process first (CERC_REMOTE_DEBUG=true uses dlv which starts geth as a child process)
|
|
||||||
pkill -P ${geth_pid}
|
|
||||||
sleep 2
|
|
||||||
kill $(jobs -p)
|
|
||||||
|
|
||||||
wait
|
|
||||||
echo "Done"
|
|
||||||
}
|
|
||||||
trap 'cleanup' SIGINT SIGTERM
|
|
||||||
|
|
||||||
if [ "true" == "$RUN_BOOTNODE" ]; then
|
|
||||||
$START_CMD \
|
|
||||||
--datadir="${CERC_ETH_DATADIR}" \
|
|
||||||
--nodekeyhex="${BOOTNODE_KEY}" \
|
|
||||||
--nodiscover \
|
|
||||||
--ipcdisable \
|
|
||||||
--networkid=${NETWORK_ID} \
|
|
||||||
--netrestrict="${NETRESTRICT}" \
|
|
||||||
&
|
|
||||||
|
|
||||||
geth_pid=$!
|
|
||||||
else
|
|
||||||
cd /opt/testnet/accounts
|
|
||||||
./import_keys.sh
|
|
||||||
|
|
||||||
echo -n "$JWT" > /opt/testnet/build/el/jwtsecret
|
|
||||||
|
|
||||||
if [ "$CERC_RUN_STATEDIFF" == "detect" ] && [ -n "$CERC_STATEDIFF_DB_HOST" ]; then
|
|
||||||
dig_result=$(dig $CERC_STATEDIFF_DB_HOST +short)
|
|
||||||
dig_status_code=$?
|
|
||||||
if [[ $dig_status_code = 0 && -n $dig_result ]]; then
|
|
||||||
echo "Statediff DB at $CERC_STATEDIFF_DB_HOST"
|
|
||||||
CERC_RUN_STATEDIFF="true"
|
|
||||||
else
|
|
||||||
echo "No statediff DB available."
|
|
||||||
CERC_RUN_STATEDIFF="false"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
STATEDIFF_OPTS=""
|
|
||||||
if [ "$CERC_RUN_STATEDIFF" == "true" ]; then
|
|
||||||
ready=0
|
|
||||||
echo "Waiting for statediff DB..."
|
|
||||||
while [ $ready -eq 0 ]; do
|
|
||||||
sleep 1
|
|
||||||
export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
|
|
||||||
result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
|
|
||||||
-p "$CERC_STATEDIFF_DB_PORT" \
|
|
||||||
-U "$CERC_STATEDIFF_DB_USER" \
|
|
||||||
-d "$CERC_STATEDIFF_DB_NAME" \
|
|
||||||
-t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
|
|
||||||
if [ -n "$result" ]; then
|
|
||||||
echo "DB ready..."
|
|
||||||
if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
|
|
||||||
ready=1
|
|
||||||
else
|
|
||||||
echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
STATEDIFF_OPTS="--statediff \
|
|
||||||
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \
|
|
||||||
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \
|
|
||||||
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
|
|
||||||
--statediff.db.password=$CERC_STATEDIFF_DB_PASSWORD \
|
|
||||||
--statediff.db.port=$CERC_STATEDIFF_DB_PORT \
|
|
||||||
--statediff.db.user=$CERC_STATEDIFF_DB_USER \
|
|
||||||
--statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
|
|
||||||
--statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
|
|
||||||
--statediff.waitforsync=true \
|
|
||||||
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
|
|
||||||
--statediff.writing=true"
|
|
||||||
|
|
||||||
if [ -d "${CERC_PLUGINS_DIR}" ]; then
|
|
||||||
# With plugeth, we separate the statediff options by prefixing with ' -- '
|
|
||||||
STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# unlock account[0]
|
|
||||||
echo $ACCOUNT_PASSWORD > "$CERC_ETH_DATADIR/password"
|
|
||||||
|
|
||||||
$START_CMD \
|
|
||||||
--datadir="${CERC_ETH_DATADIR}" \
|
|
||||||
--bootnodes="${ENODE}" \
|
|
||||||
--allow-insecure-unlock \
|
|
||||||
--password="${CERC_ETH_DATADIR}/password" \
|
|
||||||
--unlock="$ETHERBASE" \
|
|
||||||
--rpc.allow-unprotected-txs \
|
|
||||||
--http \
|
|
||||||
--http.addr="0.0.0.0" \
|
|
||||||
--http.vhosts="*" \
|
|
||||||
--http.api="${CERC_GETH_HTTP_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
|
|
||||||
--http.corsdomain="*" \
|
|
||||||
--authrpc.addr="0.0.0.0" \
|
|
||||||
--authrpc.vhosts="*" \
|
|
||||||
--authrpc.jwtsecret="/opt/testnet/build/el/jwtsecret" \
|
|
||||||
--ws \
|
|
||||||
--ws.addr="0.0.0.0" \
|
|
||||||
--ws.origins="*" \
|
|
||||||
--ws.api="${CERC_GETH_WS_APIS:-eth,web3,net,admin,personal,debug,statediff}" \
|
|
||||||
--http.corsdomain="*" \
|
|
||||||
--networkid="${NETWORK_ID}" \
|
|
||||||
--netrestrict="${NETRESTRICT}" \
|
|
||||||
--gcmode archive \
|
|
||||||
--txlookuplimit=0 \
|
|
||||||
--cache.preimages \
|
|
||||||
--syncmode=full \
|
|
||||||
--mine \
|
|
||||||
--miner.threads=1 \
|
|
||||||
--metrics \
|
|
||||||
--metrics.addr="0.0.0.0" \
|
|
||||||
--verbosity=${CERC_GETH_VERBOSITY:-3} \
|
|
||||||
--log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
|
|
||||||
--miner.etherbase="${ETHERBASE}" \
|
|
||||||
${STATEDIFF_OPTS} \
|
|
||||||
&
|
|
||||||
|
|
||||||
geth_pid=$!
|
|
||||||
fi
|
|
||||||
|
|
||||||
wait $geth_pid
|
|
||||||
|
|
||||||
if [ "true" == "$CERC_KEEP_RUNNING_AFTER_GETH_EXIT" ]; then
|
|
||||||
while [ 1 -eq 1 ]; do
|
|
||||||
sleep 60
|
|
||||||
done
|
|
||||||
fi
|
|
@ -6,14 +6,22 @@ fi
|
|||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
|
|
||||||
# Start op-batcher
|
# Get Batcher key from keys.json
|
||||||
L2_RPC="http://op-geth:8545"
|
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
ROLLUP_RPC="http://op-node:8547"
|
|
||||||
BATCHER_KEY=$(cat /l2-accounts/accounts.json | jq -r .BatcherKey)
|
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${batcher_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-batcher
|
||||||
op-batcher \
|
op-batcher \
|
||||||
--l2-eth-rpc=$L2_RPC \
|
--l2-eth-rpc=http://op-geth:8545 \
|
||||||
--rollup-rpc=$ROLLUP_RPC \
|
--rollup-rpc=http://op-node:8547 \
|
||||||
--poll-interval=1s \
|
--poll-interval=1s \
|
||||||
--sub-safety-margin=6 \
|
--sub-safety-margin=6 \
|
||||||
--num-confirmations=1 \
|
--num-confirmations=1 \
|
||||||
@ -24,4 +32,8 @@ op-batcher \
|
|||||||
--rpc.enable-admin \
|
--rpc.enable-admin \
|
||||||
--max-channel-duration=1 \
|
--max-channel-duration=1 \
|
||||||
--l1-eth-rpc=$CERC_L1_RPC \
|
--l1-eth-rpc=$CERC_L1_RPC \
|
||||||
--private-key="${BATCHER_KEY#0x}"
|
--private-key=$BATCHER_KEY \
|
||||||
|
&
|
||||||
|
|
||||||
|
batcher_pid=$!
|
||||||
|
wait $batcher_pid
|
||||||
|
@ -4,36 +4,61 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
l2_genesis_file="/l2-config/genesis.json"
|
# TODO: Add in container build or use other tool
|
||||||
|
echo "Installing jq"
|
||||||
|
apk update && apk add jq
|
||||||
|
|
||||||
# Check for genesis file; if necessary, wait on op-node to generate
|
# Get Sequencer key from keys.json
|
||||||
timeout=300 # 5 minutes
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
start_time=$(date +%s)
|
|
||||||
elapsed_time=0
|
|
||||||
echo "Checking for L2 genesis file at location $l2_genesis_file"
|
|
||||||
while [ ! -f "$l2_genesis_file" ] && [ $elapsed_time -lt $timeout ]; do
|
|
||||||
echo "Waiting for L2 genesis file to be generated..."
|
|
||||||
sleep 10
|
|
||||||
current_time=$(date +%s)
|
|
||||||
elapsed_time=$((current_time - start_time))
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ! -f "$l2_genesis_file" ]; then
|
# Initialize op-geth if datadir/geth not found
|
||||||
echo "L2 genesis file not found after timeout of $timeout seconds. Exiting..."
|
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
|
||||||
|
echo "Found existing datadir, checking block signer key"
|
||||||
|
|
||||||
|
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
|
||||||
|
|
||||||
|
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
|
||||||
|
echo "Sequencer and block signer keys match, skipping initialization"
|
||||||
|
else
|
||||||
|
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
echo "Initializing op-geth"
|
||||||
|
|
||||||
# Initialize geth from our generated L2 genesis file (if not already initialized)
|
mkdir -p datadir
|
||||||
data_dir="/datadir"
|
echo "pwd" > datadir/password
|
||||||
if [ ! -d "$datadir/geth" ]; then
|
echo $SEQUENCER_KEY > datadir/block-signer-key
|
||||||
geth init --datadir=$data_dir $l2_genesis_file
|
|
||||||
|
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
|
||||||
|
|
||||||
|
while [ ! -f "/op-node/jwt.txt" ]
|
||||||
|
do
|
||||||
|
echo "Config files not created. Checking after 5 seconds."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Config files created by op-node, proceeding with the initialization..."
|
||||||
|
|
||||||
|
geth init --datadir=datadir /op-node/genesis.json
|
||||||
|
echo "Node Initialized"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start op-geth
|
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
|
||||||
jwt_file="/l2-config/l2-jwt.txt"
|
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${geth_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-geth
|
||||||
geth \
|
geth \
|
||||||
--datadir=$data_dir \
|
--datadir ./datadir \
|
||||||
--http \
|
--http \
|
||||||
--http.corsdomain="*" \
|
--http.corsdomain="*" \
|
||||||
--http.vhosts="*" \
|
--http.vhosts="*" \
|
||||||
@ -52,5 +77,14 @@ geth \
|
|||||||
--authrpc.vhosts="*" \
|
--authrpc.vhosts="*" \
|
||||||
--authrpc.addr=0.0.0.0 \
|
--authrpc.addr=0.0.0.0 \
|
||||||
--authrpc.port=8551 \
|
--authrpc.port=8551 \
|
||||||
--authrpc.jwtsecret=$jwt_file \
|
--authrpc.jwtsecret=/op-node/jwt.txt \
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true \
|
||||||
|
--password=./datadir/password \
|
||||||
|
--allow-insecure-unlock \
|
||||||
|
--mine \
|
||||||
|
--miner.etherbase=$SEQUENCER_ADDRESS \
|
||||||
|
--unlock=$SEQUENCER_ADDRESS \
|
||||||
|
&
|
||||||
|
|
||||||
|
geth_pid=$!
|
||||||
|
wait $geth_pid
|
||||||
|
@ -4,42 +4,23 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
|
|
||||||
deploy_config_file="/l2-config/$DEPLOYMENT_CONTEXT.json"
|
# Get Sequencer key from keys.json
|
||||||
deployment_dir="/l1-deployment/$DEPLOYMENT_CONTEXT"
|
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
genesis_outfile="/l2-config/genesis.json"
|
|
||||||
rollup_outfile="/l2-config/rollup.json"
|
|
||||||
|
|
||||||
# Generate L2 genesis (if not already done)
|
|
||||||
if [ ! -f "$genesis_outfile" ] || [ ! -f "$rollup_outfile" ]; then
|
|
||||||
op-node genesis l2 \
|
|
||||||
--deploy-config $deploy_config_file \
|
|
||||||
--deployment-dir $deployment_dir \
|
|
||||||
--outfile.l2 $genesis_outfile \
|
|
||||||
--outfile.rollup $rollup_outfile \
|
|
||||||
--l1-rpc $CERC_L1_RPC
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start op-node
|
|
||||||
SEQ_KEY=$(cat /l2-accounts/accounts.json | jq -r .SeqKey)
|
|
||||||
jwt_file=/l2-config/l2-jwt.txt
|
|
||||||
L2_AUTH="http://op-geth:8551"
|
|
||||||
RPC_KIND=any # this can optionally be set to a preset for common node providers like Infura, Alchemy, etc.
|
|
||||||
|
|
||||||
|
# Run op-node
|
||||||
op-node \
|
op-node \
|
||||||
--l2=$L2_AUTH \
|
--l2=http://op-geth:8551 \
|
||||||
--l2.jwt-secret=$jwt_file \
|
--l2.jwt-secret=/op-node-data/jwt.txt \
|
||||||
--sequencer.enabled \
|
--sequencer.enabled \
|
||||||
--sequencer.l1-confs=5 \
|
--sequencer.l1-confs=3 \
|
||||||
--verifier.l1-confs=4 \
|
--verifier.l1-confs=3 \
|
||||||
--rollup.config=$rollup_outfile \
|
--rollup.config=/op-node-data/rollup.json \
|
||||||
--rpc.addr=0.0.0.0 \
|
--rpc.addr=0.0.0.0 \
|
||||||
--rpc.port=8547 \
|
--rpc.port=8547 \
|
||||||
--p2p.disable \
|
--p2p.disable \
|
||||||
--rpc.enable-admin \
|
--rpc.enable-admin \
|
||||||
--p2p.sequencer.key="${SEQ_KEY#0x}" \
|
--p2p.sequencer.key=$SEQUENCER_KEY \
|
||||||
--l1=$CERC_L1_RPC \
|
--l1=$CERC_L1_RPC \
|
||||||
--l1.rpckind=$RPC_KIND
|
--l1.rpckind=any
|
||||||
|
@ -5,18 +5,32 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
|
||||||
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
|
|
||||||
DEPLOYMENT_CONTEXT="$CERC_L1_CHAIN_ID"
|
|
||||||
|
|
||||||
# Start op-proposer
|
# Read the L2OutputOracle contract address from the deployment
|
||||||
ROLLUP_RPC="http://op-node:8547"
|
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
|
||||||
PROPOSER_KEY=$(cat /l2-accounts/accounts.json | jq -r .ProposerKey)
|
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
|
||||||
L2OO_ADDR=$(cat /l1-deployment/$DEPLOYMENT_CONTEXT/L2OutputOracleProxy.json | jq -r .address)
|
|
||||||
|
|
||||||
|
# Get Proposer key from keys.json
|
||||||
|
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
echo "Signal received, cleaning up..."
|
||||||
|
kill ${proposer_pid}
|
||||||
|
|
||||||
|
wait
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
trap 'cleanup' INT TERM
|
||||||
|
|
||||||
|
# Run op-proposer
|
||||||
op-proposer \
|
op-proposer \
|
||||||
--poll-interval=12s \
|
--poll-interval 12s \
|
||||||
--rpc.port=8560 \
|
--rpc.port 8560 \
|
||||||
--rollup-rpc=$ROLLUP_RPC \
|
--rollup-rpc http://op-node:8547 \
|
||||||
--l2oo-address="${L2OO_ADDR#0x}" \
|
--l2oo-address $L2OO_ADDR \
|
||||||
--private-key="${PROPOSER_KEY#0x}" \
|
--private-key $PROPOSER_KEY \
|
||||||
--l1-eth-rpc=$CERC_L1_RPC
|
--l1-eth-rpc $CERC_L1_RPC \
|
||||||
|
&
|
||||||
|
|
||||||
|
proposer_pid=$!
|
||||||
|
wait $proposer_pid
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
modules:
|
|
||||||
http_2xx:
|
|
||||||
prober: http
|
|
||||||
timeout: 5s
|
|
||||||
http:
|
|
||||||
valid_status_codes: [] #default to 2xx
|
|
||||||
method: GET
|
|
File diff suppressed because it is too large
Load Diff
@ -1,943 +0,0 @@
|
|||||||
{
|
|
||||||
"annotations": {
|
|
||||||
"list": [
|
|
||||||
{
|
|
||||||
"builtIn": 1,
|
|
||||||
"datasource": {
|
|
||||||
"type": "datasource",
|
|
||||||
"uid": "grafana"
|
|
||||||
},
|
|
||||||
"enable": true,
|
|
||||||
"hide": true,
|
|
||||||
"iconColor": "rgba(0, 211, 255, 1)",
|
|
||||||
"name": "Annotations & Alerts",
|
|
||||||
"type": "dashboard"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "node.js prometheus client basic metrics",
|
|
||||||
"editable": true,
|
|
||||||
"fiscalYearStartMonth": 0,
|
|
||||||
"gnetId": 11159,
|
|
||||||
"graphTooltip": 0,
|
|
||||||
"id": 15,
|
|
||||||
"links": [],
|
|
||||||
"liveNow": false,
|
|
||||||
"panels": [
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 7,
|
|
||||||
"w": 10,
|
|
||||||
"x": 0,
|
|
||||||
"y": 0
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 6,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "irate(process_cpu_user_seconds_total{instance=~\"$instance\"}[2m]) * 100",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "User CPU - {{instance}}",
|
|
||||||
"refId": "A"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "irate(process_cpu_system_seconds_total{instance=~\"$instance\"}[2m]) * 100",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Sys CPU - {{instance}}",
|
|
||||||
"refId": "B"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Process CPU Usage",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "percent",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 7,
|
|
||||||
"w": 9,
|
|
||||||
"x": 10,
|
|
||||||
"y": 0
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 8,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_eventloop_lag_seconds{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{instance}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Event Loop Lag",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "s",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"fixedColor": "text",
|
|
||||||
"mode": "fixed"
|
|
||||||
},
|
|
||||||
"mappings": [
|
|
||||||
{
|
|
||||||
"options": {
|
|
||||||
"match": "null",
|
|
||||||
"result": {
|
|
||||||
"text": "N/A"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type": "special"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green",
|
|
||||||
"value": null
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 3,
|
|
||||||
"w": 5,
|
|
||||||
"x": 19,
|
|
||||||
"y": 0
|
|
||||||
},
|
|
||||||
"id": 2,
|
|
||||||
"interval": "",
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"colorMode": "none",
|
|
||||||
"graphMode": "none",
|
|
||||||
"justifyMode": "auto",
|
|
||||||
"orientation": "auto",
|
|
||||||
"reduceOptions": {
|
|
||||||
"calcs": [
|
|
||||||
"mean"
|
|
||||||
],
|
|
||||||
"fields": "",
|
|
||||||
"values": false
|
|
||||||
},
|
|
||||||
"textMode": "name",
|
|
||||||
"wideLayout": true
|
|
||||||
},
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "sum(nodejs_version_info{instance=~\"$instance\"}) by (version)",
|
|
||||||
"format": "time_series",
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{version}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Node.js Version",
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"fixedColor": "#F2495C",
|
|
||||||
"mode": "fixed"
|
|
||||||
},
|
|
||||||
"mappings": [
|
|
||||||
{
|
|
||||||
"options": {
|
|
||||||
"match": "null",
|
|
||||||
"result": {
|
|
||||||
"text": "N/A"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type": "special"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green",
|
|
||||||
"value": null
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 4,
|
|
||||||
"w": 5,
|
|
||||||
"x": 19,
|
|
||||||
"y": 3
|
|
||||||
},
|
|
||||||
"id": 4,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"colorMode": "none",
|
|
||||||
"graphMode": "area",
|
|
||||||
"justifyMode": "auto",
|
|
||||||
"orientation": "horizontal",
|
|
||||||
"reduceOptions": {
|
|
||||||
"calcs": [
|
|
||||||
"lastNotNull"
|
|
||||||
],
|
|
||||||
"fields": "",
|
|
||||||
"values": false
|
|
||||||
},
|
|
||||||
"textMode": "auto",
|
|
||||||
"wideLayout": true
|
|
||||||
},
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "sum(changes(process_start_time_seconds{instance=~\"$instance\"}[1m]))",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{instance}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Process Restart Times",
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 7,
|
|
||||||
"w": 16,
|
|
||||||
"x": 0,
|
|
||||||
"y": 7
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 7,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"rightSide": true,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "process_resident_memory_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Process Memory - {{instance}}",
|
|
||||||
"refId": "A"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_heap_size_total_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Heap Total - {{instance}}",
|
|
||||||
"refId": "B"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_heap_size_used_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Heap Used - {{instance}}",
|
|
||||||
"refId": "C"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_external_memory_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "External Memory - {{instance}}",
|
|
||||||
"refId": "D"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Process Memory Usage",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "bytes",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 7,
|
|
||||||
"w": 8,
|
|
||||||
"x": 16,
|
|
||||||
"y": 7
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 9,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_active_handles_total{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Active Handler - {{instance}}",
|
|
||||||
"refId": "A"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_active_requests_total{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Active Request - {{instance}}",
|
|
||||||
"refId": "B"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Active Handlers/Requests Total",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 8,
|
|
||||||
"x": 0,
|
|
||||||
"y": 14
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 10,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"rightSide": false,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_heap_space_size_total_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Heap Total - {{instance}} - {{space}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Heap Total Detail",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "bytes",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 8,
|
|
||||||
"x": 8,
|
|
||||||
"y": 14
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 11,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"rightSide": false,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_heap_space_size_used_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Heap Used - {{instance}} - {{space}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Heap Used Detail",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "bytes",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"aliasColors": {},
|
|
||||||
"bars": false,
|
|
||||||
"dashLength": 10,
|
|
||||||
"dashes": false,
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"fill": 1,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 8,
|
|
||||||
"x": 16,
|
|
||||||
"y": 14
|
|
||||||
},
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"id": 12,
|
|
||||||
"legend": {
|
|
||||||
"alignAsTable": true,
|
|
||||||
"avg": true,
|
|
||||||
"current": true,
|
|
||||||
"max": true,
|
|
||||||
"min": true,
|
|
||||||
"rightSide": false,
|
|
||||||
"show": true,
|
|
||||||
"total": false,
|
|
||||||
"values": true
|
|
||||||
},
|
|
||||||
"lines": true,
|
|
||||||
"linewidth": 1,
|
|
||||||
"links": [],
|
|
||||||
"nullPointMode": "null",
|
|
||||||
"options": {
|
|
||||||
"alertThreshold": true
|
|
||||||
},
|
|
||||||
"paceLength": 10,
|
|
||||||
"percentage": false,
|
|
||||||
"pluginVersion": "10.2.2",
|
|
||||||
"pointradius": 2,
|
|
||||||
"points": false,
|
|
||||||
"renderer": "flot",
|
|
||||||
"seriesOverrides": [],
|
|
||||||
"spaceLength": 10,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"expr": "nodejs_heap_space_size_available_bytes{instance=~\"$instance\"}",
|
|
||||||
"format": "time_series",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Heap Used - {{instance}} - {{space}}",
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"thresholds": [],
|
|
||||||
"timeRegions": [],
|
|
||||||
"title": "Heap Available Detail",
|
|
||||||
"tooltip": {
|
|
||||||
"shared": true,
|
|
||||||
"sort": 0,
|
|
||||||
"value_type": "individual"
|
|
||||||
},
|
|
||||||
"type": "graph",
|
|
||||||
"xaxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yaxes": [
|
|
||||||
{
|
|
||||||
"format": "bytes",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"format": "short",
|
|
||||||
"logBase": 1,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"yaxis": {
|
|
||||||
"align": false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"refresh": "10s",
|
|
||||||
"schemaVersion": 38,
|
|
||||||
"tags": [
|
|
||||||
"nodejs"
|
|
||||||
],
|
|
||||||
"templating": {
|
|
||||||
"list": [
|
|
||||||
{
|
|
||||||
"current": {
|
|
||||||
"selected": true,
|
|
||||||
"text": [
|
|
||||||
"All"
|
|
||||||
],
|
|
||||||
"value": [
|
|
||||||
"$__all"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "PBFA97CFB590B2093"
|
|
||||||
},
|
|
||||||
"definition": "label_values(nodejs_version_info, instance)",
|
|
||||||
"hide": 0,
|
|
||||||
"includeAll": true,
|
|
||||||
"label": "instance",
|
|
||||||
"multi": true,
|
|
||||||
"name": "instance",
|
|
||||||
"options": [],
|
|
||||||
"query": "label_values(nodejs_version_info, instance)",
|
|
||||||
"refresh": 1,
|
|
||||||
"regex": "",
|
|
||||||
"skipUrlSync": false,
|
|
||||||
"sort": 1,
|
|
||||||
"tagValuesQuery": "",
|
|
||||||
"tagsQuery": "",
|
|
||||||
"type": "query",
|
|
||||||
"useTags": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"time": {
|
|
||||||
"from": "now-15m",
|
|
||||||
"to": "now"
|
|
||||||
},
|
|
||||||
"timepicker": {
|
|
||||||
"refresh_intervals": [
|
|
||||||
"5s",
|
|
||||||
"10s",
|
|
||||||
"30s",
|
|
||||||
"1m",
|
|
||||||
"5m",
|
|
||||||
"15m",
|
|
||||||
"30m",
|
|
||||||
"1h",
|
|
||||||
"2h",
|
|
||||||
"1d"
|
|
||||||
],
|
|
||||||
"time_options": [
|
|
||||||
"5m",
|
|
||||||
"15m",
|
|
||||||
"1h",
|
|
||||||
"6h",
|
|
||||||
"12h",
|
|
||||||
"24h",
|
|
||||||
"2d",
|
|
||||||
"7d",
|
|
||||||
"30d"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"timezone": "",
|
|
||||||
"title": "NodeJS Application Dashboard",
|
|
||||||
"uid": "PTSqcpJWk",
|
|
||||||
"version": 3,
|
|
||||||
"weekStart": ""
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,14 +0,0 @@
|
|||||||
# https://www.clever-cloud.com/blog/features/2021/12/03/slack-alerts-for-grafana/
|
|
||||||
|
|
||||||
apiVersion: 1
|
|
||||||
|
|
||||||
contactPoints:
|
|
||||||
- orgId: 1
|
|
||||||
name: SlackNotifier
|
|
||||||
receivers:
|
|
||||||
- uid: a71b06e3-58b6-41fe-af65-fbbb29653951
|
|
||||||
type: slack
|
|
||||||
settings:
|
|
||||||
# Slack hook URL (see https://api.slack.com/messaging/webhooks)
|
|
||||||
url: <YOUR_SLACK_HOOK_URL>
|
|
||||||
disableResolveMessage: false
|
|
@ -1,15 +0,0 @@
|
|||||||
# https://grafana.com/docs/grafana/latest/alerting/alerting-rules/create-notification-policy/
|
|
||||||
|
|
||||||
apiVersion: 1
|
|
||||||
|
|
||||||
policies:
|
|
||||||
- orgId: 1
|
|
||||||
receiver: grafana-default-email
|
|
||||||
group_by:
|
|
||||||
- grafana_folder
|
|
||||||
- alertname
|
|
||||||
routes:
|
|
||||||
- receiver: SlackNotifier
|
|
||||||
object_matchers:
|
|
||||||
# Add matchers below
|
|
||||||
# - ['grafana_folder', '=', 'MyAlerts']
|
|
@ -1,10 +0,0 @@
|
|||||||
apiVersion: 1
|
|
||||||
|
|
||||||
providers:
|
|
||||||
- name: dashboards
|
|
||||||
type: file
|
|
||||||
updateIntervalSeconds: 10
|
|
||||||
allowUiUpdates: true
|
|
||||||
options:
|
|
||||||
path: /etc/grafana/dashboards
|
|
||||||
foldersFromFilesStructure: true
|
|
@ -1,16 +0,0 @@
|
|||||||
apiVersion: 1
|
|
||||||
|
|
||||||
datasources:
|
|
||||||
- id: 1
|
|
||||||
orgId: 1
|
|
||||||
name: Prometheus
|
|
||||||
type: prometheus
|
|
||||||
typeName: Prometheus
|
|
||||||
typeLogoUrl: public/app/plugins/datasource/prometheus/img/prometheus_logo.svg
|
|
||||||
access: proxy
|
|
||||||
url: http://prometheus:9090
|
|
||||||
isDefault: true
|
|
||||||
jsonData:
|
|
||||||
httpMethod: POST
|
|
||||||
version: 1
|
|
||||||
editable: true
|
|
@ -1,8 +0,0 @@
|
|||||||
auth_modules:
|
|
||||||
foo:
|
|
||||||
type: userpass
|
|
||||||
userpass:
|
|
||||||
username: username
|
|
||||||
password: password
|
|
||||||
options:
|
|
||||||
sslmode: disable
|
|
@ -1,67 +0,0 @@
|
|||||||
global:
|
|
||||||
scrape_interval: 10s
|
|
||||||
evaluation_interval: 15s
|
|
||||||
|
|
||||||
rule_files:
|
|
||||||
# - "first.rules"
|
|
||||||
# - "second.rules"
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: prometheus
|
|
||||||
static_configs:
|
|
||||||
- targets: ['localhost:9090']
|
|
||||||
|
|
||||||
- job_name: node
|
|
||||||
static_configs:
|
|
||||||
# Add node-exporter targets to be monitored below
|
|
||||||
# - targets: ['example-host:9100']
|
|
||||||
# labels:
|
|
||||||
# instance: 'my-host'
|
|
||||||
|
|
||||||
- job_name: 'blackbox'
|
|
||||||
scrape_interval: 10s
|
|
||||||
metrics_path: /probe
|
|
||||||
params:
|
|
||||||
module: [http_2xx]
|
|
||||||
static_configs:
|
|
||||||
# Add URLs to be monitored below
|
|
||||||
- targets:
|
|
||||||
# - https://github.com
|
|
||||||
relabel_configs:
|
|
||||||
- source_labels: [__address__]
|
|
||||||
regex: (.*)(:80)?
|
|
||||||
target_label: __param_target
|
|
||||||
- source_labels: [__param_target]
|
|
||||||
regex: (.*)
|
|
||||||
target_label: instance
|
|
||||||
replacement: ${1}
|
|
||||||
- source_labels: []
|
|
||||||
regex: .*
|
|
||||||
target_label: __address__
|
|
||||||
replacement: blackbox:9115
|
|
||||||
|
|
||||||
- job_name: chain_heads
|
|
||||||
scrape_interval: 10s
|
|
||||||
metrics_path: /metrics
|
|
||||||
scheme: http
|
|
||||||
static_configs:
|
|
||||||
- targets: ['chain-head-exporter:5000']
|
|
||||||
|
|
||||||
- job_name: 'postgres'
|
|
||||||
scrape_interval: 30s
|
|
||||||
scrape_timeout: 30s
|
|
||||||
static_configs:
|
|
||||||
# Add DB targets below
|
|
||||||
# - targets: [example-server:5432]
|
|
||||||
# labels:
|
|
||||||
# instance: 'example-label'
|
|
||||||
metrics_path: /probe
|
|
||||||
params:
|
|
||||||
auth_module: [foo]
|
|
||||||
relabel_configs:
|
|
||||||
- source_labels: [__address__]
|
|
||||||
target_label: __param_target
|
|
||||||
- source_labels: [__param_target]
|
|
||||||
target_label: instance
|
|
||||||
- target_label: __address__
|
|
||||||
replacement: postgres-exporter:9187
|
|
@ -1,933 +0,0 @@
|
|||||||
# https://grafana.com/docs/grafana/latest/alerting/alerting-rules/create-grafana-managed-rule/
|
|
||||||
|
|
||||||
apiVersion: 1
|
|
||||||
|
|
||||||
groups:
|
|
||||||
- orgId: 1
|
|
||||||
name: watcher
|
|
||||||
folder: WatcherAlerts
|
|
||||||
interval: 30s
|
|
||||||
rules:
|
|
||||||
# Azimuth
|
|
||||||
- uid: azimuth_diff_external
|
|
||||||
title: azimuth_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: censures_diff_external
|
|
||||||
title: censures_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: claims_diff_external
|
|
||||||
title: claims_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: conditional_star_release_diff_external
|
|
||||||
title: conditional_star_release_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: delegated_sending_diff_external
|
|
||||||
title: delegated_sending_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: ecliptic_diff_external
|
|
||||||
title: ecliptic_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: linear_star_release_diff_external
|
|
||||||
title: linear_star_release_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="linear_star_release", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: polls_diff_external
|
|
||||||
title: polls_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="ethereum"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
|
|
||||||
# Sushi
|
|
||||||
- uid: sushiswap_diff_external
|
|
||||||
title: sushiswap_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="filecoin"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
||||||
- uid: merkl_sushiswap_diff_external
|
|
||||||
title: merkl_sushiswap_watcher_head_tracking
|
|
||||||
condition: condition
|
|
||||||
data:
|
|
||||||
- refId: diff
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
disableTextWrap: false
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number - on(chain) group_right sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
|
||||||
fullMetaSearch: false
|
|
||||||
includeNullMetadata: true
|
|
||||||
instant: true
|
|
||||||
intervalMs: 1000
|
|
||||||
legendFormat: __auto
|
|
||||||
maxDataPoints: 43200
|
|
||||||
range: false
|
|
||||||
refId: diff
|
|
||||||
useBackend: false
|
|
||||||
- refId: latest_external
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: latest_block_number{chain="filecoin"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_external
|
|
||||||
- refId: latest_indexed
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: PBFA97CFB590B2093
|
|
||||||
model:
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: PBFA97CFB590B2093
|
|
||||||
editorMode: code
|
|
||||||
expr: sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"}
|
|
||||||
hide: false
|
|
||||||
instant: true
|
|
||||||
legendFormat: __auto
|
|
||||||
range: false
|
|
||||||
refId: latest_indexed
|
|
||||||
- refId: condition
|
|
||||||
relativeTimeRange:
|
|
||||||
from: 600
|
|
||||||
to: 0
|
|
||||||
datasourceUid: __expr__
|
|
||||||
model:
|
|
||||||
conditions:
|
|
||||||
- evaluator:
|
|
||||||
params:
|
|
||||||
- 0
|
|
||||||
- 0
|
|
||||||
type: gt
|
|
||||||
operator:
|
|
||||||
type: and
|
|
||||||
query:
|
|
||||||
params: []
|
|
||||||
reducer:
|
|
||||||
params: []
|
|
||||||
type: avg
|
|
||||||
type: query
|
|
||||||
datasource:
|
|
||||||
name: Expression
|
|
||||||
type: __expr__
|
|
||||||
uid: __expr__
|
|
||||||
expression: ${diff} >= 16
|
|
||||||
intervalMs: 1000
|
|
||||||
maxDataPoints: 43200
|
|
||||||
refId: condition
|
|
||||||
type: math
|
|
||||||
noDataState: Alerting
|
|
||||||
execErrState: Alerting
|
|
||||||
for: 15m
|
|
||||||
annotations:
|
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
|
||||||
isPaused: false
|
|
@ -1,3 +0,0 @@
|
|||||||
NEXT_PUBLIC_COINGECKO_URL=https://api.coingecko.com
|
|
||||||
|
|
||||||
NEXT_PUBLIC_ENABLE_FEATURES=true
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check and exit if a deployment already exists (on restarts)
|
|
||||||
if [ -d /app-builds/osmosis/build ]; then
|
|
||||||
echo "Build already exists, remove volume to rebuild"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
yarn build:static
|
|
||||||
./build-urbit.sh
|
|
||||||
|
|
||||||
# Move build to app-builds
|
|
||||||
mkdir -p /app-builds/osmosis
|
|
||||||
cp -r ./out /app-builds/osmosis/build
|
|
||||||
|
|
||||||
cp -r mar /app-builds/osmosis/
|
|
||||||
cp desk.docket-0 /app-builds/osmosis/
|
|
@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check and exit if a deployment already exists (on restarts)
|
|
||||||
if [ -d /app-builds/osmosis/build ]; then
|
|
||||||
echo "Build already exists, remove volume to rebuild"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
yarn build:static
|
|
||||||
|
|
||||||
# Move build to app-builds
|
|
||||||
mkdir -p /app-builds/osmosis
|
|
||||||
cp -r ./out /app-builds/osmosis/build
|
|
@ -1,16 +0,0 @@
|
|||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name _;
|
|
||||||
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
root /usr/share/nginx/osmosis/build;
|
|
||||||
index index.html index.htm index.nginx-debian.html;
|
|
||||||
|
|
||||||
# First attempt to serve request as file, then as html,
|
|
||||||
# then as directory, then fall back to displaying a 404.
|
|
||||||
try_files $uri $uri.html $uri/ /index.html =404;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
:~ title+'Osmosis'
|
|
||||||
info+'Osmosis DEX Frontend, built and maintained by Laconic'
|
|
||||||
color+0xcd.75df
|
|
||||||
image+'https://altcoinsbox.com/wp-content/uploads/2023/03/osmosis-logo.png'
|
|
||||||
base+'osmosis'
|
|
||||||
glob-http+['REPLACE_WITH_GLOB_URL' REPLACE_WITH_GLOB_HASH]
|
|
||||||
version+[0 0 2]
|
|
||||||
website+'https://osmosis.zone/'
|
|
||||||
license+'MIT'
|
|
||||||
==
|
|
@ -1,12 +0,0 @@
|
|||||||
|_ dat=@
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/image/x-icon (as-octs:mimes:html dat)]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|%
|
|
||||||
++ mime |=([p=mite q=octs] q.q)
|
|
||||||
++ noun @
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,12 +0,0 @@
|
|||||||
|_ dat=@
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/image/jpeg (as-octs:mimes:html dat)]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|%
|
|
||||||
++ mime |=([p=mite q=octs] q.q)
|
|
||||||
++ noun @
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,18 +0,0 @@
|
|||||||
::
|
|
||||||
:::: /hoon/map/mar
|
|
||||||
:: Mark for js source maps
|
|
||||||
/? 310
|
|
||||||
::
|
|
||||||
=, eyre
|
|
||||||
|_ mud=@
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|% :: convert from
|
|
||||||
++ mime |=([p=mite q=octs] (@t q.q))
|
|
||||||
++ noun cord :: clam from %noun
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,12 +0,0 @@
|
|||||||
|_ dat=@
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/image/webp (as-octs:mimes:html dat)]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|%
|
|
||||||
++ mime |=([p=mite q=octs] q.q)
|
|
||||||
++ noun @
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if [ "$ENABLE_PROXY" = "true" ]; then
|
|
||||||
echo "Proxy server enabled"
|
|
||||||
yarn proxy
|
|
||||||
else
|
|
||||||
echo "Proxy server disabled, exiting"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check and exit if a deployment already exists (on restarts)
|
|
||||||
if [ -d /app-builds/uniswap/build ]; then
|
|
||||||
echo "Build already exists, remove volume to rebuild"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
yarn build
|
|
||||||
|
|
||||||
# Copy over build and other files to app-builds for urbit deployment
|
|
||||||
mkdir -p /app-builds/uniswap
|
|
||||||
cp -r ./build /app-builds/uniswap/
|
|
||||||
|
|
||||||
cp -r mar /app-builds/uniswap/
|
|
||||||
cp desk.docket-0 /app-builds/uniswap/
|
|
@ -1,10 +0,0 @@
|
|||||||
:~ title+'Uniswap'
|
|
||||||
info+'Self-hosted uniswap frontend.'
|
|
||||||
color+0xcd.75df
|
|
||||||
image+'https://logowik.com/content/uploads/images/uniswap-uni7403.jpg'
|
|
||||||
base+'uniswap'
|
|
||||||
glob-http+['REPLACE_WITH_GLOB_URL' REPLACE_WITH_GLOB_HASH]
|
|
||||||
version+[0 0 1]
|
|
||||||
website+'https://uniswap.org/'
|
|
||||||
license+'MIT'
|
|
||||||
==
|
|
@ -1,18 +0,0 @@
|
|||||||
::
|
|
||||||
:::: /hoon/map/mar
|
|
||||||
:: Mark for js source maps
|
|
||||||
/? 310
|
|
||||||
::
|
|
||||||
=, eyre
|
|
||||||
|_ mud=@
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/application/octet-stream (as-octs:mimes:html (@t mud))]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|% :: convert from
|
|
||||||
++ mime |=([p=mite q=octs] (@t q.q))
|
|
||||||
++ noun cord :: clam from %noun
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,12 +0,0 @@
|
|||||||
|_ dat=octs
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/font/ttf dat]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|%
|
|
||||||
++ mime |=([=mite =octs] octs)
|
|
||||||
++ noun octs
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,12 +0,0 @@
|
|||||||
|_ dat=octs
|
|
||||||
++ grow
|
|
||||||
|%
|
|
||||||
++ mime [/font/woff dat]
|
|
||||||
--
|
|
||||||
++ grab
|
|
||||||
|%
|
|
||||||
++ mime |=([=mite =octs] octs)
|
|
||||||
++ noun octs
|
|
||||||
--
|
|
||||||
++ grad %mime
|
|
||||||
--
|
|
@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# $1: Remote user host
|
|
||||||
# $2: App name (eg. uniswap)
|
|
||||||
# $3: Assets dir path (local) for app (eg. /home/user/myapp/urbit-files)
|
|
||||||
# $4: Remote Urbit ship's pier dir path (eg. /home/user/zod)
|
|
||||||
# $5: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
|
|
||||||
# $6: Glob file hash (eg. 0vabcd)
|
|
||||||
|
|
||||||
if [ "$#" -ne 6 ]; then
|
|
||||||
echo "Incorrect number of arguments"
|
|
||||||
echo "Usage: $0 <username@remote_host> <app_name> </path/to/app/assets/folder> </path/to/remote/pier/folder> <glob_url> <glob_hash>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
remote_user_host="$1"
|
|
||||||
app_name=$2
|
|
||||||
app_assets_folder=$3
|
|
||||||
remote_pier_folder="$4"
|
|
||||||
glob_url="$5"
|
|
||||||
glob_hash="$6"
|
|
||||||
|
|
||||||
installation_script="./install-urbit-app.sh"
|
|
||||||
|
|
||||||
# Copy over the assets to remote machine in a tmp dir
|
|
||||||
remote_app_assets_folder=/tmp/urbit-app-assets/$app_name
|
|
||||||
ssh "$remote_user_host" "mkdir -p $remote_app_assets_folder"
|
|
||||||
scp -r $app_assets_folder/* $remote_user_host:$remote_app_assets_folder
|
|
||||||
|
|
||||||
# Run the installation script
|
|
||||||
ssh "$remote_user_host" "bash -s $app_name $remote_app_assets_folder '${glob_url}' $glob_hash $remote_pier_folder" < "$installation_script"
|
|
||||||
|
|
||||||
# Remove the tmp assets dir
|
|
||||||
ssh "$remote_user_host" "rm -rf $remote_app_assets_folder"
|
|
@ -1,110 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$CERC_URBIT_APP" ]; then
|
|
||||||
echo "CERC_URBIT_APP not set, exiting"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Creating Urbit application for ${CERC_URBIT_APP}"
|
|
||||||
|
|
||||||
app_desk_dir=/urbit/zod/${CERC_URBIT_APP}
|
|
||||||
if [ -d ${app_desk_dir} ]; then
|
|
||||||
echo "Desk dir already exists for ${CERC_URBIT_APP}, skipping deployment..."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
app_build=/app-builds/${CERC_URBIT_APP}/build
|
|
||||||
app_mark_files=/app-builds/${CERC_URBIT_APP}/mar
|
|
||||||
app_docket_file=/app-builds/${CERC_URBIT_APP}/desk.docket-0
|
|
||||||
|
|
||||||
echo "Reading app build from ${app_build}"
|
|
||||||
echo "Reading additional mark files from ${app_mark_files}"
|
|
||||||
echo "Reading docket file ${app_docket_file}"
|
|
||||||
|
|
||||||
# Loop until the app's build appears
|
|
||||||
while [ ! -d ${app_build} ]; do
|
|
||||||
echo "${CERC_URBIT_APP} app build not found, retrying in 5s..."
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
echo "Build found..."
|
|
||||||
|
|
||||||
echo "Using IPFS endpoint ${CERC_IPFS_GLOB_HOST_ENDPOINT} for hosting the ${CERC_URBIT_APP} glob"
|
|
||||||
echo "Using IPFS server endpoint ${CERC_IPFS_SERVER_ENDPOINT} for reading ${CERC_URBIT_APP} glob"
|
|
||||||
ipfs_host_endpoint=${CERC_IPFS_GLOB_HOST_ENDPOINT}
|
|
||||||
ipfs_server_endpoint=${CERC_IPFS_SERVER_ENDPOINT}
|
|
||||||
|
|
||||||
# Fire curl requests to perform operations on the ship
|
|
||||||
dojo () {
|
|
||||||
curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
|
|
||||||
}
|
|
||||||
|
|
||||||
hood () {
|
|
||||||
curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create / mount the app's desk
|
|
||||||
hood "merge %${CERC_URBIT_APP} our %landscape"
|
|
||||||
hood "mount %${CERC_URBIT_APP}"
|
|
||||||
|
|
||||||
# Copy over build to desk data dir
|
|
||||||
cp -r ${app_build} ${app_desk_dir}
|
|
||||||
|
|
||||||
# Copy over the additional mark files
|
|
||||||
cp ${app_mark_files}/* ${app_desk_dir}/mar/
|
|
||||||
|
|
||||||
rm "${app_desk_dir}/desk.bill"
|
|
||||||
rm "${app_desk_dir}/desk.ship"
|
|
||||||
|
|
||||||
# Commit changes and create a glob
|
|
||||||
hood "commit %${CERC_URBIT_APP}"
|
|
||||||
dojo "-landscape!make-glob %${CERC_URBIT_APP} /build"
|
|
||||||
|
|
||||||
glob_file=$(ls -1 -c zod/.urb/put | head -1)
|
|
||||||
echo "Created glob file: ${glob_file}"
|
|
||||||
|
|
||||||
# Upload the glob file to IPFS
|
|
||||||
echo "Uploading glob file to ${ipfs_host_endpoint}"
|
|
||||||
upload_response=$(curl -X POST -F file=@./zod/.urb/put/${glob_file} ${ipfs_host_endpoint}/api/v0/add)
|
|
||||||
glob_cid=$(echo "$upload_response" | grep -o '"Hash":"[^"]*' | sed 's/"Hash":"//')
|
|
||||||
|
|
||||||
glob_url="${ipfs_server_endpoint}/ipfs/${glob_cid}?filename=${glob_file}"
|
|
||||||
glob_hash=$(echo "$glob_file" | sed "s/glob-\([a-z0-9\.]*\).glob/\1/")
|
|
||||||
|
|
||||||
echo "Glob file uploaded to IFPS:"
|
|
||||||
echo "{ cid: ${glob_cid}, filename: ${glob_file} }"
|
|
||||||
echo "{ url: ${glob_url}, hash: ${glob_hash} }"
|
|
||||||
|
|
||||||
# Exit if the installation not required
|
|
||||||
if [ "$CERC_ENABLE_APP_INSTALL" = "false" ]; then
|
|
||||||
echo "CERC_ENABLE_APP_INSTALL set to false, skipping app installation"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Curl and wait for the glob to be hosted
|
|
||||||
echo "Checking if glob file hosted at ${glob_url}"
|
|
||||||
while true; do
|
|
||||||
response=$(curl -sL -w "%{http_code}" -o /dev/null "$glob_url")
|
|
||||||
|
|
||||||
if [ $response -eq 200 ]; then
|
|
||||||
echo "File found at $glob_url"
|
|
||||||
break # Exit the loop if the file is found
|
|
||||||
else
|
|
||||||
echo "File not found, retrying in a 5s..."
|
|
||||||
sleep 5
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Replace the docket file for app
|
|
||||||
# Substitue the glob URL and hash
|
|
||||||
cp ${app_docket_file} ${app_desk_dir}/
|
|
||||||
sed -i "s|REPLACE_WITH_GLOB_URL|${glob_url}|g; s|REPLACE_WITH_GLOB_HASH|${glob_hash}|g" ${app_desk_dir}/desk.docket-0
|
|
||||||
|
|
||||||
# Commit changes and install the app
|
|
||||||
hood "commit %${CERC_URBIT_APP}"
|
|
||||||
hood "install our %${CERC_URBIT_APP}"
|
|
||||||
|
|
||||||
echo "${CERC_URBIT_APP} app installed"
|
|
@ -1,60 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# $1: App name (eg. uniswap)
|
|
||||||
# $2: Assets dir path (local) for app (eg. /home/user/myapp/urbit-files)
|
|
||||||
# $3: Glob file URL (eg. https://xyz.com/glob-0vabcd.glob)
|
|
||||||
# $4: Glob file hash (eg. 0vabcd)
|
|
||||||
# $5: Urbit ship's pier dir (default: ./zod)
|
|
||||||
|
|
||||||
if [ "$#" -lt 4 ]; then
|
|
||||||
echo "Insufficient arguments"
|
|
||||||
echo "Usage: $0 <app_name> </path/to/app/assets/folder> <glob_url> <glob_hash> [/path/to/remote/pier/folder]"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
app_name=$1
|
|
||||||
app_mark_files=$2/mar
|
|
||||||
app_docket_file=$2/desk.docket-0
|
|
||||||
echo "Creating Urbit application for ${app_name}"
|
|
||||||
echo "Reading additional mark files from ${app_mark_files}"
|
|
||||||
echo "Reading docket file ${app_docket_file}"
|
|
||||||
|
|
||||||
glob_url=$3
|
|
||||||
glob_hash=$4
|
|
||||||
echo "Using glob file from ${glob_url} with hash ${glob_hash}"
|
|
||||||
|
|
||||||
# Default pier dir: ./zod
|
|
||||||
# Default desk dir: ./zod/<app_name>
|
|
||||||
pier_dir="${5:-./zod}"
|
|
||||||
app_desk_dir="${pier_dir}/${app_name}"
|
|
||||||
echo "Using ${app_desk_dir} as the ${app_name} desk dir path"
|
|
||||||
|
|
||||||
# Fire curl requests to perform operations on the ship
|
|
||||||
dojo () {
|
|
||||||
curl -s --data '{"source":{"dojo":"'"$1"'"},"sink":{"stdout":null}}' http://localhost:12321
|
|
||||||
}
|
|
||||||
|
|
||||||
hood () {
|
|
||||||
curl -s --data '{"source":{"dojo":"+hood/'"$1"'"},"sink":{"app":"hood"}}' http://localhost:12321
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create / mount the app's desk
|
|
||||||
hood "merge %${app_name} our %landscape"
|
|
||||||
hood "mount %${app_name}"
|
|
||||||
|
|
||||||
# Copy over the additional mark files
|
|
||||||
cp ${app_mark_files}/* ${app_desk_dir}/mar/
|
|
||||||
|
|
||||||
rm "${app_desk_dir}/desk.bill"
|
|
||||||
rm "${app_desk_dir}/desk.ship"
|
|
||||||
|
|
||||||
# Replace the docket file for app
|
|
||||||
# Substitue the glob URL and hash
|
|
||||||
cp ${app_docket_file} ${app_desk_dir}/
|
|
||||||
sed -i "s|REPLACE_WITH_GLOB_URL|${glob_url}|g; s|REPLACE_WITH_GLOB_HASH|${glob_hash}|g" ${app_desk_dir}/desk.docket-0
|
|
||||||
|
|
||||||
# Commit changes and install the app
|
|
||||||
hood "commit %${app_name}"
|
|
||||||
hood "install our %${app_name}"
|
|
||||||
|
|
||||||
echo "${app_name} app installed"
|
|
@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
pier_dir="/urbit/zod"
|
|
||||||
|
|
||||||
# TODO: Bootstrap fake ship on the first run
|
|
||||||
|
|
||||||
# Run urbit ship in daemon mode
|
|
||||||
# Check if the directory exists
|
|
||||||
if [ -d "$pier_dir" ]; then
|
|
||||||
echo "Pier directory already exists, rebooting..."
|
|
||||||
/urbit/zod/.run -d
|
|
||||||
else
|
|
||||||
echo "Creating a new fake ship..."
|
|
||||||
urbit -d -F zod
|
|
||||||
fi
|
|
@ -1,28 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
|
||||||
# Read in the config template TOML file and modify it
|
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
|
||||||
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
|
||||||
|
|
||||||
# Write the modified content to a new file
|
|
||||||
echo "$WATCHER_CONFIG" > environments/watcher-config.toml
|
|
||||||
|
|
||||||
# Merge SO watcher config with existing config file
|
|
||||||
node merge-toml.js
|
|
||||||
|
|
||||||
yarn watch:contract --address $CONTRACT_ADDRESS --kind $CONTRACT_NAME --checkpoint true --starting-block $STARTING_BLOCK
|
|
||||||
|
|
||||||
echo 'yarn job-runner'
|
|
||||||
yarn job-runner
|
|
@ -4,17 +4,18 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}"
|
||||||
|
CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}"
|
||||||
|
|
||||||
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
|
||||||
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
|
||||||
echo "Using historicalLogsBlockRange ${CERC_HISTORICAL_BLOCK_RANGE:-2000}"
|
|
||||||
|
|
||||||
# Replace env variables in template TOML file
|
# Replace env variables in template TOML file
|
||||||
# Read in the config template TOML file and modify it
|
# Read in the config template TOML file and modify it
|
||||||
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
|
||||||
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
||||||
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
|
||||||
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
|
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}| ")
|
||||||
s|REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE|${CERC_HISTORICAL_BLOCK_RANGE:-2000}| ")
|
|
||||||
|
|
||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/watcher-config.toml
|
echo "$WATCHER_CONFIG" > environments/watcher-config.toml
|
||||||
|
@ -2,9 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
maxSimultaneousRequests = -1
|
maxSimultaneousRequests = -1
|
||||||
|
|
||||||
[metrics]
|
|
||||||
host = "0.0.0.0"
|
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
host = "watcher-db"
|
host = "watcher-db"
|
||||||
port = 5432
|
port = 5432
|
||||||
@ -15,7 +12,3 @@
|
|||||||
[upstream.ethServer]
|
[upstream.ethServer]
|
||||||
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
|
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
|
||||||
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
|
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
|
||||||
|
|
||||||
[jobQueue]
|
|
||||||
historicalLogsBlockRange = REPLACE_WITH_CERC_HISTORICAL_BLOCK_RANGE
|
|
||||||
blockDelayInMilliSecs = 12000
|
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
# Defaults
|
||||||
|
|
||||||
|
# ipld-eth-server endpoints
|
||||||
|
DEFAULT_CERC_IPLD_ETH_RPC=
|
||||||
|
DEFAULT_CERC_IPLD_ETH_GQL=
|
@ -16,5 +16,8 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
|||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|
||||||
|
echo "Initializing watcher..."
|
||||||
|
yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1))
|
||||||
|
|
||||||
echo "Running server..."
|
echo "Running server..."
|
||||||
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
gqlPath = '/'
|
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -41,7 +40,7 @@
|
|||||||
timeTravelMaxAge = 86400 # 1 day
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "127.0.0.1"
|
||||||
port = 9000
|
port = 9000
|
||||||
[metrics.gql]
|
[metrics.gql]
|
||||||
port = 9001
|
port = 9001
|
||||||
@ -84,6 +83,8 @@
|
|||||||
subgraphEventsOrder = true
|
subgraphEventsOrder = true
|
||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
prefetchBlocksInMem = false
|
||||||
|
prefetchBlockCount = 10
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
|
@ -16,5 +16,8 @@ WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
|
|||||||
# Write the modified content to a new file
|
# Write the modified content to a new file
|
||||||
echo "$WATCHER_CONFIG" > environments/local.toml
|
echo "$WATCHER_CONFIG" > environments/local.toml
|
||||||
|
|
||||||
|
echo "Initializing watcher..."
|
||||||
|
yarn fill --start-block $SUSHISWAP_START_BLOCK --end-block $((SUSHISWAP_START_BLOCK + 1))
|
||||||
|
|
||||||
echo "Running server..."
|
echo "Running server..."
|
||||||
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
port = 3008
|
port = 3008
|
||||||
kind = "active"
|
kind = "active"
|
||||||
gqlPath = "/"
|
|
||||||
|
|
||||||
# Checkpointing state.
|
# Checkpointing state.
|
||||||
checkpointing = true
|
checkpointing = true
|
||||||
@ -41,7 +40,7 @@
|
|||||||
timeTravelMaxAge = 86400 # 1 day
|
timeTravelMaxAge = 86400 # 1 day
|
||||||
|
|
||||||
[metrics]
|
[metrics]
|
||||||
host = "0.0.0.0"
|
host = "127.0.0.1"
|
||||||
port = 9000
|
port = 9000
|
||||||
[metrics.gql]
|
[metrics.gql]
|
||||||
port = 9001
|
port = 9001
|
||||||
@ -84,6 +83,8 @@
|
|||||||
subgraphEventsOrder = true
|
subgraphEventsOrder = true
|
||||||
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
# Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime
|
||||||
blockDelayInMilliSecs = 30000
|
blockDelayInMilliSecs = 30000
|
||||||
|
prefetchBlocksInMem = false
|
||||||
|
prefetchBlockCount = 10
|
||||||
|
|
||||||
# Boolean to switch between modes of processing events when starting the server.
|
# Boolean to switch between modes of processing events when starting the server.
|
||||||
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
# Setting to true will fetch filtered events and required blocks in a range of blocks and then process them.
|
||||||
|
@ -2,6 +2,4 @@
|
|||||||
# Build a local version of the task executor for act-runner
|
# Build a local version of the task executor for act-runner
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/act-runner/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR}
|
||||||
cd ${CERC_REPO_BASE_DIR}/hosting/act-runner
|
|
||||||
docker build -t cerc/act-runner-task-executor:local -f Dockerfile.task-executor ${build_command_args} .
|
|
||||||
|
@ -10,10 +10,9 @@ COPY genesis /opt/genesis
|
|||||||
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
|
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
|
||||||
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
|
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
|
||||||
COPY --from=ethgen /apps /apps
|
COPY --from=ethgen /apps /apps
|
||||||
RUN cd /apps/el-gen && pip3 install --break-system-packages -r requirements.txt
|
RUN cd /apps/el-gen && pip3 install -r requirements.txt
|
||||||
# web3==5.24.0 used by el-gen is broken on python 3.11
|
# web3==5.24.0 used by el-gen is broken on python 3.11
|
||||||
RUN pip3 install --break-system-packages --upgrade "web3==6.5.0"
|
RUN pip3 install --upgrade "web3==6.5.0"
|
||||||
RUN pip3 install --break-system-packages --upgrade "typing-extensions"
|
|
||||||
|
|
||||||
# Build genesis config
|
# Build genesis config
|
||||||
RUN apk add --no-cache make bash envsubst jq
|
RUN apk add --no-cache make bash envsubst jq
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user