Merge branch 'main' into telackey/webapp
This commit is contained in:
commit
084dcb6301
36
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
36
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
name: Fixturenet-Eth-Plugeth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: 'ci-test'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: "Install Python"
|
||||||
|
uses: cerc-io/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
@ -4,6 +4,11 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches: 'ci-test'
|
branches: 'ci-test'
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run an Ethereum fixturenet test"
|
name: "Run an Ethereum fixturenet test"
|
||||||
@ -23,5 +28,10 @@ jobs:
|
|||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: Start dockerd # Also needed until we can incorporate into the executor
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: ./tests/fixturenet-eth/run-test.sh
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
|
||||||
|
@ -37,3 +37,4 @@ jobs:
|
|||||||
sleep 5
|
sleep 5
|
||||||
- name: "Run smoke tests"
|
- name: "Run smoke tests"
|
||||||
run: ./tests/smoke-test/run-smoke-test.sh
|
run: ./tests/smoke-test/run-smoke-test.sh
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ Stack Orchestrator allows building and deployment of a Laconic Stack on a single
|
|||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-ubuntu.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
|
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-linux.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
|
||||||
|
|
||||||
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
|
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
|
||||||
|
|
||||||
|
@ -6,4 +6,4 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|||||||
# See: https://stackoverflow.com/a/246128/1701505
|
# See: https://stackoverflow.com/a/246128/1701505
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
docker build -t cerc/lighthouse:local ${build_command_args} ${SCRIPT_DIR}
|
docker build -t cerc/lighthouse:local ${build_command_args} --build-arg TAG_SUFFIX="" ${SCRIPT_DIR}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: "1.0"
|
version: "1.0"
|
||||||
name: azimuth
|
name: azimuth
|
||||||
repos:
|
repos:
|
||||||
- github.com/cerc-io/azimuth-watcher-ts
|
- github.com/cerc-io/azimuth-watcher-ts@v0.1.1
|
||||||
containers:
|
containers:
|
||||||
- cerc/watcher-azimuth
|
- cerc/watcher-azimuth
|
||||||
pods:
|
pods:
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from dataclasses import dataclass
|
|
||||||
from importlib import util
|
from importlib import util
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -27,6 +26,7 @@ from app.deploy_types import DeploymentContext, DeployCommandContext
|
|||||||
def _make_default_deployment_dir():
|
def _make_default_deployment_dir():
|
||||||
return "deployment-001"
|
return "deployment-001"
|
||||||
|
|
||||||
|
|
||||||
def _get_ports(stack):
|
def _get_ports(stack):
|
||||||
ports = {}
|
ports = {}
|
||||||
parsed_stack = get_parsed_stack_config(stack)
|
parsed_stack = get_parsed_stack_config(stack)
|
||||||
@ -42,6 +42,7 @@ def _get_ports(stack):
|
|||||||
ports[svc_name] = [ str(x) for x in svc["ports"] ]
|
ports[svc_name] = [ str(x) for x in svc["ports"] ]
|
||||||
return ports
|
return ports
|
||||||
|
|
||||||
|
|
||||||
def _get_named_volumes(stack):
|
def _get_named_volumes(stack):
|
||||||
# Parse the compose files looking for named volumes
|
# Parse the compose files looking for named volumes
|
||||||
named_volumes = []
|
named_volumes = []
|
||||||
@ -76,30 +77,30 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
|
|||||||
|
|
||||||
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
||||||
def _fixup_pod_file(pod, spec, compose_dir):
|
def _fixup_pod_file(pod, spec, compose_dir):
|
||||||
# Fix up volumes
|
# Fix up volumes
|
||||||
if "volumes" in spec:
|
if "volumes" in spec:
|
||||||
spec_volumes = spec["volumes"]
|
spec_volumes = spec["volumes"]
|
||||||
if "volumes" in pod:
|
if "volumes" in pod:
|
||||||
pod_volumes = pod["volumes"]
|
pod_volumes = pod["volumes"]
|
||||||
for volume in pod_volumes.keys():
|
for volume in pod_volumes.keys():
|
||||||
if volume in spec_volumes:
|
if volume in spec_volumes:
|
||||||
volume_spec = spec_volumes[volume]
|
volume_spec = spec_volumes[volume]
|
||||||
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
||||||
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
||||||
new_volume_spec = {"driver": "local",
|
new_volume_spec = {"driver": "local",
|
||||||
"driver_opts": {
|
"driver_opts": {
|
||||||
"type": "none",
|
"type": "none",
|
||||||
"device": volume_spec_fixedup,
|
"device": volume_spec_fixedup,
|
||||||
"o": "bind"
|
"o": "bind"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pod["volumes"][volume] = new_volume_spec
|
pod["volumes"][volume] = new_volume_spec
|
||||||
# Fix up ports
|
# Fix up ports
|
||||||
if "ports" in spec:
|
if "ports" in spec:
|
||||||
spec_ports = spec["ports"]
|
spec_ports = spec["ports"]
|
||||||
for container_name, container_ports in spec_ports.items():
|
for container_name, container_ports in spec_ports.items():
|
||||||
if container_name in pod["services"]:
|
if container_name in pod["services"]:
|
||||||
pod["services"][container_name]["ports"] = container_ports
|
pod["services"][container_name]["ports"] = container_ports
|
||||||
|
|
||||||
|
|
||||||
def call_stack_deploy_init(deploy_command_context):
|
def call_stack_deploy_init(deploy_command_context):
|
||||||
@ -107,10 +108,13 @@ def call_stack_deploy_init(deploy_command_context):
|
|||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
if python_file_path.exists():
|
||||||
imported_stack = util.module_from_spec(spec)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
spec.loader.exec_module(imported_stack)
|
imported_stack = util.module_from_spec(spec)
|
||||||
return imported_stack.init(deploy_command_context)
|
spec.loader.exec_module(imported_stack)
|
||||||
|
return imported_stack.init(deploy_command_context)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
@ -119,10 +123,13 @@ def call_stack_deploy_setup(deploy_command_context, extra_args):
|
|||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
if python_file_path.exists():
|
||||||
imported_stack = util.module_from_spec(spec)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
spec.loader.exec_module(imported_stack)
|
imported_stack = util.module_from_spec(spec)
|
||||||
return imported_stack.setup(deploy_command_context, extra_args)
|
spec.loader.exec_module(imported_stack)
|
||||||
|
return imported_stack.setup(deploy_command_context, extra_args)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
@ -131,10 +138,13 @@ def call_stack_deploy_create(deployment_context):
|
|||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
python_file_path = get_stack_file_path(deployment_context.command_context.stack).parent.joinpath("deploy", "commands.py")
|
python_file_path = get_stack_file_path(deployment_context.command_context.stack).parent.joinpath("deploy", "commands.py")
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
if python_file_path.exists():
|
||||||
imported_stack = util.module_from_spec(spec)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
spec.loader.exec_module(imported_stack)
|
imported_stack = util.module_from_spec(spec)
|
||||||
return imported_stack.create(deployment_context)
|
spec.loader.exec_module(imported_stack)
|
||||||
|
return imported_stack.create(deployment_context)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Inspect the pod yaml to find config files referenced in subdirectories
|
# Inspect the pod yaml to find config files referenced in subdirectories
|
||||||
|
@ -23,7 +23,7 @@ ssh root@IP
|
|||||||
2. Get the install script, give it executable permissions, and run it:
|
2. Get the install script, give it executable permissions, and run it:
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-ubuntu.sh
|
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
chmod +x install.sh
|
chmod +x install.sh
|
||||||
|
@ -28,12 +28,43 @@ fi
|
|||||||
# Check python3 is available
|
# Check python3 is available
|
||||||
# Check machine resources are sufficient
|
# Check machine resources are sufficient
|
||||||
|
|
||||||
|
# Determine if we are on Debian or Ubuntu
|
||||||
|
linux_distro=$(lsb_release -a 2>/dev/null | grep "^Distributor ID:" | cut -f 2)
|
||||||
|
# Some systems don't have lsb_release installed (e.g. ChromeOS) and so we try to
|
||||||
|
# use /etc/os-release instead
|
||||||
|
if [[ -z "$linux_distro" ]]; then
|
||||||
|
if [[ -f "/etc/os-release" ]]; then
|
||||||
|
distro_name_string=$(grep "^NAME=" /etc/os-release | cut -d '=' -f 2)
|
||||||
|
if [[ $distro_name_string =~ Debian ]]; then
|
||||||
|
linux_distro="Debian"
|
||||||
|
elif [[ $distro_name_string =~ Ubuntu ]]; then
|
||||||
|
linux_distro="Ubuntu"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Failed to identify distro: /etc/os-release doesn't exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
case $linux_distro in
|
||||||
|
Debian)
|
||||||
|
echo "Installing docker for Debian"
|
||||||
|
;;
|
||||||
|
Ubuntu)
|
||||||
|
echo "Installing docker for Ubuntu"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "ERROR: Detected unknown distribution $linux_distro, can't install docker"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
# dismiss the popups
|
# dismiss the popups
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
## https://docs.docker.com/engine/install/ubuntu/
|
## https://docs.docker.com/engine/install/ubuntu/
|
||||||
|
## https://docs.docker.com/engine/install/debian/
|
||||||
## https://superuser.com/questions/518859/ignore-packages-that-are-not-currently-installed-when-using-apt-get-remove1
|
## https://superuser.com/questions/518859/ignore-packages-that-are-not-currently-installed-when-using-apt-get-remove1
|
||||||
packages_to_remove="docker docker-engine docker.io containerd runc"
|
packages_to_remove="docker docker-engine docker.io containerd runc docker-compose docker-doc podman-docker"
|
||||||
installed_packages_to_remove=""
|
installed_packages_to_remove=""
|
||||||
for package_to_remove in $(echo $packages_to_remove); do
|
for package_to_remove in $(echo $packages_to_remove); do
|
||||||
$(dpkg --info $package_to_remove &> /dev/null)
|
$(dpkg --info $package_to_remove &> /dev/null)
|
||||||
@ -65,10 +96,25 @@ sudo apt -y install curl
|
|||||||
sudo apt -y install ca-certificates gnupg
|
sudo apt -y install ca-certificates gnupg
|
||||||
|
|
||||||
# Add dockerco package repository
|
# Add dockerco package repository
|
||||||
sudo mkdir -m 0755 -p /etc/apt/keyrings
|
# For reasons not obvious, the dockerco instructions for installation on
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
# Debian and Ubuntu are slightly different here
|
||||||
|
case $linux_distro in
|
||||||
|
Debian)
|
||||||
|
sudo install -m 0755 -d /etc/apt/keyrings
|
||||||
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||||
|
sudo chmod a+r /etc/apt/keyrings/docker.gpg
|
||||||
|
;;
|
||||||
|
Ubuntu)
|
||||||
|
sudo mkdir -m 0755 -p /etc/apt/keyrings
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "ERROR: Detected unknown distribution $linux_distro, can't install docker"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
echo \
|
echo \
|
||||||
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/${linux_distro,,} \
|
||||||
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
||||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
|
44
tests/fixturenet-eth-plugeth/run-test.sh
Executable file
44
tests/fixturenet-eth-plugeth/run-test.sh
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
|
echo "Running stack-orchestrator Ethereum plugeth fixturenet test"
|
||||||
|
# Bit of a hack, test the most recent package
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
CERC_STACK_NAME=fixturenet-plugeth-tx
|
||||||
|
# Set a new unique repo dir
|
||||||
|
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
|
||||||
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
echo "Test version command"
|
||||||
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
echo "Version reported is: ${reported_version_string}"
|
||||||
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME setup-repositories
|
||||||
|
echo "Building containers"
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME build-containers
|
||||||
|
echo "Images in registry:"
|
||||||
|
docker image ls
|
||||||
|
echo "Deploying the cluster"
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy up
|
||||||
|
# Verify that the fixturenet is up and running
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy ps
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
|
||||||
|
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-plugeth-tx deploy exec foundry "cast block-number")
|
||||||
|
# Check that the block number increases some time later
|
||||||
|
sleep 12
|
||||||
|
subsequent_block_number=$($TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec foundry "cast block-number")
|
||||||
|
block_number_difference=$((subsequent_block_number - initial_block_number))
|
||||||
|
# Block height difference should be between 1 and some small number
|
||||||
|
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 10 ]]; then
|
||||||
|
echo "Test passed"
|
||||||
|
test_result=0
|
||||||
|
else
|
||||||
|
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
|
||||||
|
test_result=1
|
||||||
|
fi
|
||||||
|
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy down
|
||||||
|
echo "Removing cloned repositories"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
exit $test_result
|
@ -4,36 +4,45 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Running stack-orchestrator Ethereum fixturenet test"
|
echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Ethereum fixturenet test"
|
||||||
# Bit of a hack, test the most recent package
|
# Bit of a hack, test the most recent package
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
# Set a new unique repo dir
|
# Set a new unique repo dir
|
||||||
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
|
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
|
||||||
echo "Test version command"
|
echo "$(date +"%Y-%m-%d %T"): Test version command"
|
||||||
reported_version_string=$( $TEST_TARGET_SO version )
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
echo "Version reported is: ${reported_version_string}"
|
echo "$(date +"%Y-%m-%d %T"): Version reported is: ${reported_version_string}"
|
||||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
echo "$(date +"%Y-%m-%d %T"): Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories
|
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Building containers"
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth build-containers
|
$TEST_TARGET_SO --stack fixturenet-eth build-containers
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Starting stack"
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy up
|
$TEST_TARGET_SO --stack fixturenet-eth deploy up
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Stack started"
|
||||||
# Verify that the fixturenet is up and running
|
# Verify that the fixturenet is up and running
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy ps
|
$TEST_TARGET_SO --stack fixturenet-eth deploy ps
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
|
# echo "$(date +"%Y-%m-%d %T"): Getting stack status"
|
||||||
|
# $TEST_TARGET_SO --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Getting initial block number"
|
||||||
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
||||||
# Check that the block number increases some time later
|
# Check that the block number increases some time later
|
||||||
sleep 12
|
sleep 120
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Getting subsequent block number"
|
||||||
subsequent_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
subsequent_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
||||||
block_number_difference=$((subsequent_block_number - initial_block_number))
|
block_number_difference=$((subsequent_block_number - initial_block_number))
|
||||||
# Block height difference should be between 1 and some small number
|
# Block height difference should be between 1 and some small number
|
||||||
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 10 ]]; then
|
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 100 ]]; then
|
||||||
echo "Test passed"
|
echo "Test passed"
|
||||||
test_result=0
|
test_result=0
|
||||||
else
|
else
|
||||||
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
|
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
|
||||||
|
echo "Logs from stack:"
|
||||||
|
$TEST_TARGET_SO --stack fixturenet-eth deploy logs
|
||||||
test_result=1
|
test_result=1
|
||||||
fi
|
fi
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy down
|
$TEST_TARGET_SO --stack fixturenet-eth deploy down
|
||||||
echo "Removing cloned repositories"
|
echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories"
|
||||||
rm -rf $CERC_REPO_BASE_DIR
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
echo "$(date +"%Y-%m-%d %T"): Test finished"
|
||||||
exit $test_result
|
exit $test_result
|
||||||
|
Loading…
Reference in New Issue
Block a user