Create new base container for pre-compiled webapps. #507

Merged
telackey merged 3 commits from telackey/webapp into main 2023-08-22 18:32:45 +00:00
11 changed files with 207 additions and 51 deletions
Showing only changes of commit 084dcb6301 - Show all commits

View File

@ -0,0 +1,36 @@
name: Fixturenet-Eth-Plugeth-Test
on:
push:
branches: 'ci-test'
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run an Ethereum plugeth fixturenet test"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth-plugeth/run-test.sh

View File

@ -4,6 +4,11 @@ on:
push:
branches: 'ci-test'
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run an Ethereum fixturenet test"
@ -23,5 +28,10 @@ jobs:
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth/run-test.sh

View File

@ -37,3 +37,4 @@ jobs:
sleep 5
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh

View File

@ -6,7 +6,7 @@ Stack Orchestrator allows building and deployment of a Laconic Stack on a single
## Install
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-ubuntu.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-linux.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**

View File

@ -6,4 +6,4 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# See: https://stackoverflow.com/a/246128/1701505
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/lighthouse:local ${build_command_args} ${SCRIPT_DIR}
docker build -t cerc/lighthouse:local ${build_command_args} --build-arg TAG_SUFFIX="" ${SCRIPT_DIR}

View File

@ -1,7 +1,7 @@
version: "1.0"
name: azimuth
repos:
- github.com/cerc-io/azimuth-watcher-ts
- github.com/cerc-io/azimuth-watcher-ts@v0.1.1
containers:
- cerc/watcher-azimuth
pods:

View File

@ -14,7 +14,6 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from importlib import util
import os
from pathlib import Path
@ -27,6 +26,7 @@ from app.deploy_types import DeploymentContext, DeployCommandContext
def _make_default_deployment_dir():
return "deployment-001"
def _get_ports(stack):
ports = {}
parsed_stack = get_parsed_stack_config(stack)
@ -42,6 +42,7 @@ def _get_ports(stack):
ports[svc_name] = [ str(x) for x in svc["ports"] ]
return ports
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
named_volumes = []
@ -76,30 +77,30 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod, spec, compose_dir):
# Fix up volumes
if "volumes" in spec:
spec_volumes = spec["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
# Fix up volumes
if "volumes" in spec:
spec_volumes = spec["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
"type": "none",
"device": volume_spec_fixedup,
"o": "bind"
}
}
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec:
spec_ports = spec["ports"]
for container_name, container_ports in spec_ports.items():
if container_name in pod["services"]:
pod["services"][container_name]["ports"] = container_ports
}
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec:
spec_ports = spec["ports"]
for container_name, container_ports in spec_ports.items():
if container_name in pod["services"]:
pod["services"][container_name]["ports"] = container_ports
def call_stack_deploy_init(deploy_command_context):
@ -107,10 +108,13 @@ def call_stack_deploy_init(deploy_command_context):
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.init(deploy_command_context)
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.init(deploy_command_context)
else:
return None
# TODO: fold this with function above
@ -119,10 +123,13 @@ def call_stack_deploy_setup(deploy_command_context, extra_args):
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(deploy_command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.setup(deploy_command_context, extra_args)
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.setup(deploy_command_context, extra_args)
else:
return None
# TODO: fold this with function above
@ -131,10 +138,13 @@ def call_stack_deploy_create(deployment_context):
# Call a function in it
# If no function found, return None
python_file_path = get_stack_file_path(deployment_context.command_context.stack).parent.joinpath("deploy", "commands.py")
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.create(deployment_context)
if python_file_path.exists():
spec = util.spec_from_file_location("commands", python_file_path)
imported_stack = util.module_from_spec(spec)
spec.loader.exec_module(imported_stack)
return imported_stack.create(deployment_context)
else:
return None
# Inspect the pod yaml to find config files referenced in subdirectories

View File

@ -23,7 +23,7 @@ ssh root@IP
2. Get the install script, give it executable permissions, and run it:
```
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-ubuntu.sh
curl -o install.sh https://raw.githubusercontent.com/cerc-io/stack-orchestrator/main/scripts/quick-install-linux.sh
```
```
chmod +x install.sh

View File

@ -28,12 +28,43 @@ fi
# Check python3 is available
# Check machine resources are sufficient
# Determine if we are on Debian or Ubuntu
linux_distro=$(lsb_release -a 2>/dev/null | grep "^Distributor ID:" | cut -f 2)
# Some systems don't have lsb_release installed (e.g. ChromeOS) and so we try to
# use /etc/os-release instead
if [[ -z "$linux_distro" ]]; then
if [[ -f "/etc/os-release" ]]; then
distro_name_string=$(grep "^NAME=" /etc/os-release | cut -d '=' -f 2)
if [[ $distro_name_string =~ Debian ]]; then
linux_distro="Debian"
elif [[ $distro_name_string =~ Ubuntu ]]; then
linux_distro="Ubuntu"
fi
else
echo "Failed to identify distro: /etc/os-release doesn't exist"
exit 1
fi
fi
case $linux_distro in
Debian)
echo "Installing docker for Debian"
;;
Ubuntu)
echo "Installing docker for Ubuntu"
;;
*)
echo "ERROR: Detected unknown distribution $linux_distro, can't install docker"
exit 1
;;
esac
# dismiss the popups
export DEBIAN_FRONTEND=noninteractive
## https://docs.docker.com/engine/install/ubuntu/
## https://docs.docker.com/engine/install/debian/
## https://superuser.com/questions/518859/ignore-packages-that-are-not-currently-installed-when-using-apt-get-remove1
packages_to_remove="docker docker-engine docker.io containerd runc"
packages_to_remove="docker docker-engine docker.io containerd runc docker-compose docker-doc podman-docker"
installed_packages_to_remove=""
for package_to_remove in $(echo $packages_to_remove); do
$(dpkg --info $package_to_remove &> /dev/null)
@ -65,10 +96,25 @@ sudo apt -y install curl
sudo apt -y install ca-certificates gnupg
# Add dockerco package repository
sudo mkdir -m 0755 -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
# For reasons not obvious, the dockerco instructions for installation on
# Debian and Ubuntu are slightly different here
case $linux_distro in
Debian)
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
;;
Ubuntu)
sudo mkdir -m 0755 -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
;;
*)
echo "ERROR: Detected unknown distribution $linux_distro, can't install docker"
exit 1
;;
esac
echo \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/${linux_distro,,} \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
set -e
echo "Running stack-orchestrator Ethereum plugeth fixturenet test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
CERC_STACK_NAME=fixturenet-plugeth-tx
# Set a new unique repo dir
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
$TEST_TARGET_SO --stack $CERC_STACK_NAME setup-repositories
echo "Building containers"
$TEST_TARGET_SO --stack $CERC_STACK_NAME build-containers
echo "Images in registry:"
docker image ls
echo "Deploying the cluster"
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy up
# Verify that the fixturenet is up and running
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy ps
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-plugeth-tx deploy exec foundry "cast block-number")
# Check that the block number increases some time later
sleep 12
subsequent_block_number=$($TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec foundry "cast block-number")
block_number_difference=$((subsequent_block_number - initial_block_number))
# Block height difference should be between 1 and some small number
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 10 ]]; then
echo "Test passed"
test_result=0
else
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
test_result=1
fi
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy down
echo "Removing cloned repositories"
rm -rf $CERC_REPO_BASE_DIR
exit $test_result

View File

@ -4,36 +4,45 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
echo "Running stack-orchestrator Ethereum fixturenet test"
echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Ethereum fixturenet test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a new unique repo dir
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
echo "$(date +"%Y-%m-%d %T"): Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories
echo "$(date +"%Y-%m-%d %T"): Version reported is: ${reported_version_string}"
echo "$(date +"%Y-%m-%d %T"): Cloning repositories into: $CERC_REPO_BASE_DIR"
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories
echo "$(date +"%Y-%m-%d %T"): Building containers"
$TEST_TARGET_SO --stack fixturenet-eth build-containers
echo "$(date +"%Y-%m-%d %T"): Starting stack"
$TEST_TARGET_SO --stack fixturenet-eth deploy up
echo "$(date +"%Y-%m-%d %T"): Stack started"
# Verify that the fixturenet is up and running
$TEST_TARGET_SO --stack fixturenet-eth deploy ps
$TEST_TARGET_SO --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
# echo "$(date +"%Y-%m-%d %T"): Getting stack status"
# $TEST_TARGET_SO --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
echo "$(date +"%Y-%m-%d %T"): Getting initial block number"
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
# Check that the block number increases some time later
sleep 12
sleep 120
echo "$(date +"%Y-%m-%d %T"): Getting subsequent block number"
subsequent_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
block_number_difference=$((subsequent_block_number - initial_block_number))
# Block height difference should be between 1 and some small number
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 10 ]]; then
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 100 ]]; then
echo "Test passed"
test_result=0
else
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
echo "Logs from stack:"
$TEST_TARGET_SO --stack fixturenet-eth deploy logs
test_result=1
fi
$TEST_TARGET_SO --stack fixturenet-eth deploy down
echo "Removing cloned repositories"
echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories"
rm -rf $CERC_REPO_BASE_DIR
echo "$(date +"%Y-%m-%d %T"): Test finished"
exit $test_result