forked from cerc-io/stack-orchestrator
Compare commits
4 Commits
pm-custom-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| ccccd9f957 | |||
| 34f3b719e4 | |||
| 0e814bd4da | |||
| 873a6d472c |
@ -26,8 +26,14 @@ fi
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
WORK_DIR="${1:-/app}"
|
||||
|
||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
|
||||
./build-webapp.sh || exit 1
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -f "next.config.mjs" ]; then
|
||||
NEXT_CONFIG_JS="next.config.mjs"
|
||||
IMPORT_OR_REQUIRE="import"
|
||||
|
||||
@ -30,36 +30,44 @@ fi
|
||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||
cd "$CERC_WEBAPP_FILES_DIR"
|
||||
|
||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||
mv .next .next.old
|
||||
mv .next-r/.next .
|
||||
if [ -f "./run-webapp.sh" ]; then
|
||||
echo "Running webapp with run-webapp.sh ..."
|
||||
cd "${WORK_DIR}" || exit 1
|
||||
./run-webapp.sh &
|
||||
tpid=$!
|
||||
wait $tpid
|
||||
else
|
||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||
mv .next .next.old
|
||||
mv .next-r/.next .
|
||||
|
||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
npm run cerc_generate > gen.out 2>&1 &
|
||||
tail -f gen.out &
|
||||
tpid=$!
|
||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
npm run cerc_generate > gen.out 2>&1 &
|
||||
tail -f gen.out &
|
||||
tpid=$!
|
||||
|
||||
count=0
|
||||
generate_done="false"
|
||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
grep 'rendered as static' gen.out > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
generate_done="true"
|
||||
count=0
|
||||
generate_done="false"
|
||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
grep 'rendered as static' gen.out > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
generate_done="true"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $generate_done != "true" ]; then
|
||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $generate_done != "true" ]; then
|
||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||
exit 1
|
||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||
tpid=""
|
||||
fi
|
||||
|
||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||
tpid=""
|
||||
fi
|
||||
fi
|
||||
|
||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||
fi
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||
|
||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
|
||||
def create(context: DeploymentContext, extra_args):
|
||||
@ -23,17 +22,12 @@ def create(context: DeploymentContext, extra_args):
|
||||
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
|
||||
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
|
||||
|
||||
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
|
||||
yaml = YAML()
|
||||
yaml_data = yaml.load(yaml_file)
|
||||
|
||||
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
|
||||
|
||||
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
||||
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
||||
def add_geth_volume(yaml_data):
|
||||
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
||||
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
||||
|
||||
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
|
||||
yaml = YAML()
|
||||
yaml.dump(yaml_data, yaml_file)
|
||||
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
|
||||
|
||||
return None
|
||||
|
||||
@ -2,7 +2,6 @@ version: "1.0"
|
||||
name: test
|
||||
description: "A test stack"
|
||||
repos:
|
||||
- git.vdb.to/cerc-io/laconicd
|
||||
- git.vdb.to/cerc-io/test-project@test-branch
|
||||
containers:
|
||||
- cerc/test-container
|
||||
|
||||
@ -45,20 +45,22 @@ class DeploymentContext:
|
||||
def get_compose_dir(self):
|
||||
return self.deployment_dir.joinpath(constants.compose_dir_name)
|
||||
|
||||
def get_compose_file(self, name: str):
|
||||
return self.get_compose_dir() / f"docker-compose-{name}.yml"
|
||||
|
||||
def get_cluster_id(self):
|
||||
return self.id
|
||||
|
||||
def init(self, dir):
|
||||
self.deployment_dir = dir
|
||||
def init(self, dir: Path):
|
||||
self.deployment_dir = dir.absolute()
|
||||
self.spec = Spec()
|
||||
self.spec.init_from_file(self.get_spec_file())
|
||||
self.stack = Stack(self.spec.obj["stack"])
|
||||
self.stack.init_from_file(self.get_stack_file())
|
||||
deployment_file_path = self.get_deployment_file()
|
||||
if deployment_file_path.exists():
|
||||
with deployment_file_path:
|
||||
obj = get_yaml().load(open(deployment_file_path, "r"))
|
||||
self.id = obj[constants.cluster_id_key]
|
||||
obj = get_yaml().load(open(deployment_file_path, "r"))
|
||||
self.id = obj[constants.cluster_id_key]
|
||||
# Handle the case of a legacy deployment with no file
|
||||
# Code below is intended to match the output from _make_default_cluster_name()
|
||||
# TODO: remove when we no longer need to support legacy deployments
|
||||
@ -67,3 +69,19 @@ class DeploymentContext:
|
||||
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
||||
self.id = f"{constants.cluster_name_prefix}{hash}"
|
||||
|
||||
def modify_yaml(self, file_path: Path, modifier_func):
|
||||
"""
|
||||
Load a YAML from the deployment, apply a modification function, and write it back.
|
||||
"""
|
||||
if not file_path.absolute().is_relative_to(self.deployment_dir):
|
||||
raise ValueError(f"File is not inside deployment directory: {file_path}")
|
||||
|
||||
yaml = get_yaml()
|
||||
with open(file_path, 'r') as f:
|
||||
yaml_data = yaml.load(f)
|
||||
|
||||
modifier_func(yaml_data)
|
||||
|
||||
with open(file_path, 'w') as f:
|
||||
yaml.dump(yaml_data, f)
|
||||
|
||||
@ -443,18 +443,16 @@ def _check_volume_definitions(spec):
|
||||
@click.command()
|
||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||
# TODO: Hack
|
||||
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
||||
@click.option("--initial-peers", help="Initial set of persistent peers")
|
||||
@click.argument('extra_args', nargs=-1, type=click.UNPROCESSED)
|
||||
@click.pass_context
|
||||
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
def create(ctx, spec_file, deployment_dir, extra_args):
|
||||
deployment_command_context = ctx.obj
|
||||
return create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers)
|
||||
return create_operation(deployment_command_context, spec_file, deployment_dir, extra_args)
|
||||
|
||||
|
||||
# The init command's implementation is in a separate function so that we can
|
||||
# call it from other commands, bypassing the click decoration stuff
|
||||
def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers):
|
||||
def create_operation(deployment_command_context, spec_file, deployment_dir, extra_args):
|
||||
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
|
||||
_check_volume_definitions(parsed_spec)
|
||||
stack_name = parsed_spec["stack"]
|
||||
@ -541,7 +539,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
|
||||
# TODO: make deployment_dir_path a Path above
|
||||
deployer_config_generator.generate(deployment_dir_path)
|
||||
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
||||
call_stack_deploy_create(deployment_context, extra_args)
|
||||
|
||||
|
||||
# TODO: this code should be in the stack .py files but
|
||||
|
||||
@ -92,9 +92,8 @@ class Spec:
|
||||
return self.obj.get(item, default)
|
||||
|
||||
def init_from_file(self, file_path: Path):
|
||||
with file_path:
|
||||
self.obj = get_yaml().load(open(file_path, "r"))
|
||||
self.file_path = file_path
|
||||
self.obj = get_yaml().load(open(file_path, "r"))
|
||||
self.file_path = file_path
|
||||
|
||||
def get_image_registry(self):
|
||||
return self.obj.get(constants.image_registry_key)
|
||||
|
||||
@ -27,5 +27,4 @@ class Stack:
|
||||
self.name = name
|
||||
|
||||
def init_from_file(self, file_path: Path):
|
||||
with file_path:
|
||||
self.obj = get_yaml().load(open(file_path, "r"))
|
||||
self.obj = get_yaml().load(open(file_path, "r"))
|
||||
|
||||
@ -92,7 +92,6 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
|
||||
spec_file_name,
|
||||
deployment_dir,
|
||||
None,
|
||||
None
|
||||
)
|
||||
# Fix up the container tag inside the deployment compose file
|
||||
_fixup_container_tag(deployment_dir, image)
|
||||
|
||||
@ -172,7 +172,6 @@ def process_app_deployment_request(
|
||||
logger.log(
|
||||
f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}"
|
||||
)
|
||||
# CREATES DEPLOYMENT DIR, NOT SKIPPING FOR TESTING
|
||||
deploy_webapp.create_deployment(
|
||||
ctx,
|
||||
deployment_dir,
|
||||
@ -215,24 +214,21 @@ def process_app_deployment_request(
|
||||
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
||||
logger.log("Tag complete")
|
||||
else:
|
||||
# SKIP BUILD
|
||||
logger.log("TESTING: Skipping container build.")
|
||||
|
||||
# extra_build_args = [] # TODO: pull from request
|
||||
# logger.log(f"Building container image: {deployment_container_tag}")
|
||||
# build_container_image(
|
||||
# app, deployment_container_tag, extra_build_args, logger
|
||||
# )
|
||||
# logger.log("Build complete")
|
||||
# logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||
# push_container_image(deployment_dir, logger)
|
||||
# logger.log("Push complete")
|
||||
# # The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||
# logger.log(
|
||||
# f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
||||
# )
|
||||
# # add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||
# logger.log("Tag complete")
|
||||
extra_build_args = [] # TODO: pull from request
|
||||
logger.log(f"Building container image: {deployment_container_tag}")
|
||||
build_container_image(
|
||||
app, deployment_container_tag, extra_build_args, logger
|
||||
)
|
||||
logger.log("Build complete")
|
||||
logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||
push_container_image(deployment_dir, logger)
|
||||
logger.log("Push complete")
|
||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||
logger.log(
|
||||
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
||||
)
|
||||
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||
logger.log("Tag complete")
|
||||
else:
|
||||
logger.log("Requested app is already deployed, skipping build and image push")
|
||||
|
||||
@ -245,9 +241,7 @@ def process_app_deployment_request(
|
||||
|
||||
# 8. update k8s deployment
|
||||
if needs_k8s_deploy:
|
||||
# SKIP DEPLOY
|
||||
logger.log("TESTING: Skipping deployment to k8s.")
|
||||
# deploy_to_k8s(deployment_record, deployment_dir, recreate_on_deploy, logger)
|
||||
deploy_to_k8s(deployment_record, deployment_dir, recreate_on_deploy, logger)
|
||||
|
||||
logger.log("Publishing deployment to registry.")
|
||||
publish_deployment(
|
||||
|
||||
@ -72,12 +72,11 @@ def process_app_removal_request(
|
||||
# TODO(telackey): Call the function directly. The easiest way to build the correct click context is to
|
||||
# exec the process, but it would be better to refactor so we could just call down_operation with the
|
||||
# necessary parameters
|
||||
main_logger.log("TESTING: Skipping stopping deployment.")
|
||||
# down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
||||
# if delete_volumes:
|
||||
# down_command.append("--delete-volumes")
|
||||
# result = subprocess.run(down_command)
|
||||
# result.check_returncode()
|
||||
down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
||||
if delete_volumes:
|
||||
down_command.append("--delete-volumes")
|
||||
result = subprocess.run(down_command)
|
||||
result.check_returncode()
|
||||
|
||||
removal_record = {
|
||||
"record": {
|
||||
|
||||
@ -180,9 +180,7 @@ def get_k8s_dir():
|
||||
def get_parsed_deployment_spec(spec_file):
|
||||
spec_file_path = Path(spec_file)
|
||||
try:
|
||||
with spec_file_path:
|
||||
deploy_spec = get_yaml().load(open(spec_file_path, "r"))
|
||||
return deploy_spec
|
||||
return get_yaml().load(open(spec_file_path, "r"))
|
||||
except FileNotFoundError as error:
|
||||
# We try here to generate a useful diagnostic error
|
||||
print(f"Error: spec file: {spec_file_path} does not exist")
|
||||
|
||||
@ -14,8 +14,13 @@ delete_cluster_exit () {
|
||||
|
||||
# Test basic stack-orchestrator deploy
|
||||
echo "Running stack-orchestrator deploy test"
|
||||
# Bit of a hack, test the most recent package
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
|
||||
if [ "$1" == "from-path" ]; then
|
||||
TEST_TARGET_SO="laconic-so"
|
||||
else
|
||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||
fi
|
||||
|
||||
# Set a non-default repo dir
|
||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||
echo "Testing this package: $TEST_TARGET_SO"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user