Add timed logging for the webapp build (#771)
All checks were successful
Lint Checks / Run linter (push) Successful in 32s
Publish / Build and publish (push) Successful in 59s
Smoke Test / Run basic test suite (push) Successful in 3m1s
Webapp Test / Run webapp test suite (push) Successful in 4m47s
Deploy Test / Run deploy test suite (push) Successful in 5m40s
All checks were successful
Lint Checks / Run linter (push) Successful in 32s
Publish / Build and publish (push) Successful in 59s
Smoke Test / Run basic test suite (push) Successful in 3m1s
Webapp Test / Run webapp test suite (push) Successful in 4m47s
Deploy Test / Run deploy test suite (push) Successful in 5m40s
Add lots of log and timer output to webapp builds. Reviewed-on: #771
This commit is contained in:
parent
f32bbf9e48
commit
10e2311a8b
@ -27,7 +27,7 @@ from decouple import config
|
|||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from stack_orchestrator.build import build_containers
|
from stack_orchestrator.build import build_containers
|
||||||
from stack_orchestrator.deploy.webapp.util import determine_base_container
|
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
from stack_orchestrator.build.build_types import BuildContext
|
||||||
|
|
||||||
|
|
||||||
@ -40,9 +40,11 @@ from stack_orchestrator.build.build_types import BuildContext
|
|||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
'''build the specified webapp container'''
|
'''build the specified webapp container'''
|
||||||
|
logger = TimedLogger()
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
@ -51,12 +53,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
|
||||||
if not quiet:
|
if verbose:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
logger.log(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
@ -65,6 +67,9 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
force_rebuild, extra_build_args)
|
force_rebuild, extra_build_args)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
logger.log(f"Building base container: {base_container}")
|
||||||
|
|
||||||
build_context_1 = BuildContext(
|
build_context_1 = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
base_container,
|
base_container,
|
||||||
@ -74,9 +79,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
)
|
)
|
||||||
ok = build_containers.process_container(build_context_1)
|
ok = build_containers.process_container(build_context_1)
|
||||||
if not ok:
|
if not ok:
|
||||||
print("ERROR: Build failed.", file=sys.stderr)
|
logger.log("ERROR: Build failed.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
@ -85,9 +93,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
"Dockerfile.webapp")
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
else:
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag
|
container_build_env["CERC_CONTAINER_BUILD_TAG"] = tag
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
logger.log(f"Building app container: {tag}")
|
||||||
|
|
||||||
build_context_2 = BuildContext(
|
build_context_2 = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
@ -98,5 +109,9 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
)
|
)
|
||||||
ok = build_containers.process_container(build_context_2)
|
ok = build_containers.process_container(build_context_2)
|
||||||
if not ok:
|
if not ok:
|
||||||
print("ERROR: Build failed.", file=sys.stderr)
|
logger.log("ERROR: Build failed.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
logger.log(f"App container {base_container} build finished.")
|
||||||
|
logger.log("build-webapp complete", show_step_time=False, show_total_time=True)
|
||||||
|
@ -115,7 +115,7 @@ if [ "$CERC_NEXT_VERSION" != "keep" ] && [ "$CUR_NEXT_VERSION" != "$CERC_NEXT_VE
|
|||||||
mv package.json.$$ package.json
|
mv package.json.$$ package.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$CERC_BUILD_TOOL install || exit 1
|
time $CERC_BUILD_TOOL install || exit 1
|
||||||
|
|
||||||
CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json`
|
CUR_NEXT_VERSION=`jq -r '.version' node_modules/next/package.json`
|
||||||
|
|
||||||
@ -138,9 +138,9 @@ to use for the build with:
|
|||||||
EOF
|
EOF
|
||||||
cat package.json | jq ".dependencies.next = \"^$CERC_MIN_NEXTVER\"" > package.json.$$
|
cat package.json | jq ".dependencies.next = \"^$CERC_MIN_NEXTVER\"" > package.json.$$
|
||||||
mv package.json.$$ package.json
|
mv package.json.$$ package.json
|
||||||
$CERC_BUILD_TOOL install || exit 1
|
time $CERC_BUILD_TOOL install || exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$CERC_BUILD_TOOL run cerc_compile || exit 1
|
time $CERC_BUILD_TOOL run cerc_compile || exit 1
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -32,8 +32,8 @@ elif [ -f "${WORK_DIR}/package.json" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$CERC_BUILD_TOOL install || exit 1
|
time $CERC_BUILD_TOOL install || exit 1
|
||||||
$CERC_BUILD_TOOL build || exit 1
|
time $CERC_BUILD_TOOL build || exit 1
|
||||||
|
|
||||||
rm -rf "${DEST_DIR}"
|
rm -rf "${DEST_DIR}"
|
||||||
if [ -z "${CERC_BUILD_OUTPUT_DIR}" ]; then
|
if [ -z "${CERC_BUILD_OUTPUT_DIR}" ]; then
|
||||||
|
@ -26,7 +26,7 @@ import click
|
|||||||
|
|
||||||
from stack_orchestrator.deploy.images import remote_image_exists, add_tags_to_image
|
from stack_orchestrator.deploy.images import remote_image_exists, add_tags_to_image
|
||||||
from stack_orchestrator.deploy.webapp import deploy_webapp
|
from stack_orchestrator.deploy.webapp import deploy_webapp
|
||||||
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, TimedLogger,
|
||||||
build_container_image, push_container_image,
|
build_container_image, push_container_image,
|
||||||
file_hash, deploy_to_k8s, publish_deployment,
|
file_hash, deploy_to_k8s, publish_deployment,
|
||||||
hostname_for_deployment_request, generate_hostname_for_app,
|
hostname_for_deployment_request, generate_hostname_for_app,
|
||||||
@ -34,7 +34,6 @@ from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient,
|
|||||||
|
|
||||||
|
|
||||||
def process_app_deployment_request(
|
def process_app_deployment_request(
|
||||||
run_id,
|
|
||||||
ctx,
|
ctx,
|
||||||
laconic: LaconicRegistryClient,
|
laconic: LaconicRegistryClient,
|
||||||
app_deployment_request,
|
app_deployment_request,
|
||||||
@ -44,14 +43,18 @@ def process_app_deployment_request(
|
|||||||
deployment_parent_dir,
|
deployment_parent_dir,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
force_rebuild=False,
|
force_rebuild,
|
||||||
log_file=None
|
logger
|
||||||
):
|
):
|
||||||
|
logger.log("BEGIN - process_app_deployment_request")
|
||||||
|
|
||||||
# 1. look up application
|
# 1. look up application
|
||||||
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
||||||
|
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
||||||
|
|
||||||
# 2. determine dns
|
# 2. determine dns
|
||||||
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
|
requested_name = hostname_for_deployment_request(app_deployment_request, laconic)
|
||||||
|
logger.log(f"Determined requested name: {requested_name}")
|
||||||
|
|
||||||
# HACK
|
# HACK
|
||||||
if "." in requested_name:
|
if "." in requested_name:
|
||||||
@ -69,7 +72,7 @@ def process_app_deployment_request(
|
|||||||
matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.attributes.request, require=True))
|
matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.attributes.request, require=True))
|
||||||
|
|
||||||
if matched_owner:
|
if matched_owner:
|
||||||
print("Matched DnsRecord ownership:", matched_owner)
|
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
||||||
else:
|
else:
|
||||||
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
||||||
(dns_record.id, app_deployment_request.id))
|
(dns_record.id, app_deployment_request.id))
|
||||||
@ -115,15 +118,21 @@ def process_app_deployment_request(
|
|||||||
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
if shared_tag_exists and not force_rebuild:
|
if shared_tag_exists and not force_rebuild:
|
||||||
# simply add our unique tag to the existing image and we are done
|
# simply add our unique tag to the existing image and we are done
|
||||||
print(f"Using existing app image {app_image_shared_tag} for {deployment_container_tag}", file=log_file)
|
logger.log(f"Using existing app image {app_image_shared_tag} for {deployment_container_tag}")
|
||||||
add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
||||||
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
extra_build_args = [] # TODO: pull from request
|
extra_build_args = [] # TODO: pull from request
|
||||||
build_container_image(app, deployment_container_tag, extra_build_args, log_file)
|
logger.log(f"Building container image {deployment_container_tag}")
|
||||||
push_container_image(deployment_dir, log_file)
|
build_container_image(app, deployment_container_tag, extra_build_args, logger)
|
||||||
|
logger.log("Build complete")
|
||||||
|
logger.log(f"Pushing container image {deployment_container_tag}")
|
||||||
|
push_container_image(deployment_dir, logger)
|
||||||
|
logger.log("Push complete")
|
||||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||||
print(f"Updating app image tag {app_image_shared_tag} from build of {deployment_container_tag}", file=log_file)
|
logger.log(f"Updating app image tag {app_image_shared_tag} from build of {deployment_container_tag}")
|
||||||
add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||||
|
logger.log("Tag complete")
|
||||||
|
|
||||||
# 7. update config (if needed)
|
# 7. update config (if needed)
|
||||||
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
||||||
@ -131,13 +140,13 @@ def process_app_deployment_request(
|
|||||||
|
|
||||||
# 8. update k8s deployment
|
# 8. update k8s deployment
|
||||||
if needs_k8s_deploy:
|
if needs_k8s_deploy:
|
||||||
print("Deploying to k8s")
|
|
||||||
deploy_to_k8s(
|
deploy_to_k8s(
|
||||||
deployment_record,
|
deployment_record,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
log_file
|
logger
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.log("Publishing deployment to registry.")
|
||||||
publish_deployment(
|
publish_deployment(
|
||||||
laconic,
|
laconic,
|
||||||
app,
|
app,
|
||||||
@ -146,8 +155,11 @@ def process_app_deployment_request(
|
|||||||
dns_record,
|
dns_record,
|
||||||
dns_crn,
|
dns_crn,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
app_deployment_request
|
app_deployment_request,
|
||||||
|
logger
|
||||||
)
|
)
|
||||||
|
logger.log("Publication complete.")
|
||||||
|
logger.log("END - process_app_deployment_request")
|
||||||
|
|
||||||
|
|
||||||
def load_known_requests(filename):
|
def load_known_requests(filename):
|
||||||
@ -309,8 +321,9 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
run_log_file = open(run_log_file_path, "wt")
|
run_log_file = open(run_log_file_path, "wt")
|
||||||
run_reg_client = LaconicRegistryClient(laconic_config, log_file=run_log_file)
|
run_reg_client = LaconicRegistryClient(laconic_config, log_file=run_log_file)
|
||||||
|
|
||||||
|
logger = TimedLogger(run_id, run_log_file)
|
||||||
|
logger.log("Processing ...")
|
||||||
process_app_deployment_request(
|
process_app_deployment_request(
|
||||||
run_id,
|
|
||||||
ctx,
|
ctx,
|
||||||
run_reg_client,
|
run_reg_client,
|
||||||
r,
|
r,
|
||||||
@ -321,12 +334,14 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
run_log_file
|
logger
|
||||||
)
|
)
|
||||||
status = "DEPLOYED"
|
status = "DEPLOYED"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("ERROR: " + str(e), file=run_log_file)
|
logger.log("ERROR: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
|
if logger:
|
||||||
|
logger.log(f"DONE with status {status}", show_step_time=False, show_total_time=True)
|
||||||
dump_known_requests(state_file, [r], status)
|
dump_known_requests(state_file, [r], status)
|
||||||
if run_log_file:
|
if run_log_file:
|
||||||
run_log_file.close()
|
run_log_file.close()
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
@ -39,6 +40,25 @@ class AttrDict(dict):
|
|||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
class TimedLogger:
|
||||||
|
def __init__(self, id="", file=None):
|
||||||
|
self.start = datetime.datetime.now()
|
||||||
|
self.last = self.start
|
||||||
|
self.id = id
|
||||||
|
self.file = file
|
||||||
|
|
||||||
|
def log(self, msg, show_step_time=True, show_total_time=False):
|
||||||
|
prefix = f"{datetime.datetime.utcnow()} - {self.id}"
|
||||||
|
if show_step_time:
|
||||||
|
prefix += f" - {datetime.datetime.now() - self.last} (step)"
|
||||||
|
if show_total_time:
|
||||||
|
prefix += f" - {datetime.datetime.now() - self.start} (total)"
|
||||||
|
print(f"{prefix}: {msg}", file=self.file)
|
||||||
|
if self.file:
|
||||||
|
self.file.flush()
|
||||||
|
self.last = datetime.datetime.now()
|
||||||
|
|
||||||
|
|
||||||
def logged_cmd(log_file, *vargs):
|
def logged_cmd(log_file, *vargs):
|
||||||
result = None
|
result = None
|
||||||
try:
|
try:
|
||||||
@ -219,7 +239,7 @@ def determine_base_container(clone_dir, app_type="webapp"):
|
|||||||
return base_container
|
return base_container
|
||||||
|
|
||||||
|
|
||||||
def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -228,31 +248,33 @@ def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
|||||||
repo = random.choice(app_record.attributes.repository)
|
repo = random.choice(app_record.attributes.repository)
|
||||||
clone_dir = os.path.join(tmpdir, record_id)
|
clone_dir = os.path.join(tmpdir, record_id)
|
||||||
|
|
||||||
print(f"Cloning repository {repo} to {clone_dir} ...")
|
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
||||||
if ref:
|
if ref:
|
||||||
# TODO: Determing branch or hash, and use depth 1 if we can.
|
# TODO: Determing branch or hash, and use depth 1 if we can.
|
||||||
git_env = dict(os.environ.copy())
|
git_env = dict(os.environ.copy())
|
||||||
# Never prompt
|
# Never prompt
|
||||||
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=log_file, stderr=log_file)
|
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=logger.file, stderr=logger.file)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"git clone failed. Is the repository {repo} private?", file=log_file)
|
logger.log(f"git clone failed. Is the repository {repo} private?")
|
||||||
raise e
|
raise e
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=log_file, stderr=log_file)
|
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=logger.file, stderr=logger.file)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"git checkout failed. Does ref {ref} exist?", file=log_file)
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=log_file, stderr=log_file)
|
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
base_container = determine_base_container(clone_dir, app_record.attributes.app_type)
|
base_container = determine_base_container(clone_dir, app_record.attributes.app_type)
|
||||||
|
|
||||||
print("Building webapp ...")
|
logger.log("Building webapp ...")
|
||||||
build_command = [
|
build_command = [
|
||||||
sys.argv[0], "build-webapp",
|
sys.argv[0],
|
||||||
|
"--verbose",
|
||||||
|
"build-webapp",
|
||||||
"--source-repo", clone_dir,
|
"--source-repo", clone_dir,
|
||||||
"--tag", tag,
|
"--tag", tag,
|
||||||
"--base-container", base_container
|
"--base-container", base_container
|
||||||
@ -261,28 +283,31 @@ def build_container_image(app_record, tag, extra_build_args=[], log_file=None):
|
|||||||
build_command.append("--extra-build-args")
|
build_command.append("--extra-build-args")
|
||||||
build_command.append(" ".join(extra_build_args))
|
build_command.append(" ".join(extra_build_args))
|
||||||
|
|
||||||
result = subprocess.run(build_command, stdout=log_file, stderr=log_file)
|
result = subprocess.run(build_command, stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
finally:
|
finally:
|
||||||
logged_cmd(log_file, "rm", "-rf", tmpdir)
|
logged_cmd(logger.file, "rm", "-rf", tmpdir)
|
||||||
|
|
||||||
|
|
||||||
def push_container_image(deployment_dir, log_file=None):
|
def push_container_image(deployment_dir, logger):
|
||||||
print("Pushing image ...")
|
logger.log("Pushing images ...")
|
||||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"],
|
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"],
|
||||||
stdout=log_file, stderr=log_file)
|
stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
logger.log("Finished pushing images.")
|
||||||
|
|
||||||
|
|
||||||
def deploy_to_k8s(deploy_record, deployment_dir, log_file=None):
|
def deploy_to_k8s(deploy_record, deployment_dir, logger):
|
||||||
if not deploy_record:
|
if not deploy_record:
|
||||||
command = "up"
|
command = "up"
|
||||||
else:
|
else:
|
||||||
command = "update"
|
command = "update"
|
||||||
|
|
||||||
|
logger.log("Deploying to k8s ...")
|
||||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
||||||
stdout=log_file, stderr=log_file)
|
stdout=logger.file, stderr=logger.file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
logger.log("Finished deploying to k8s.")
|
||||||
|
|
||||||
|
|
||||||
def publish_deployment(laconic: LaconicRegistryClient,
|
def publish_deployment(laconic: LaconicRegistryClient,
|
||||||
@ -292,7 +317,8 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
dns_record,
|
dns_record,
|
||||||
dns_crn,
|
dns_crn,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
app_deployment_request=None):
|
app_deployment_request=None,
|
||||||
|
logger=None):
|
||||||
if not deploy_record:
|
if not deploy_record:
|
||||||
deploy_ver = "0.0.1"
|
deploy_ver = "0.0.1"
|
||||||
else:
|
else:
|
||||||
@ -322,6 +348,8 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
new_dns_record["record"]["request"] = app_deployment_request.id
|
new_dns_record["record"]["request"] = app_deployment_request.id
|
||||||
|
|
||||||
|
if logger:
|
||||||
|
logger.log("Publishing DnsRecord.")
|
||||||
dns_id = laconic.publish(new_dns_record, [dns_crn])
|
dns_id = laconic.publish(new_dns_record, [dns_crn])
|
||||||
|
|
||||||
new_deployment_record = {
|
new_deployment_record = {
|
||||||
@ -341,6 +369,8 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
new_deployment_record["record"]["request"] = app_deployment_request.id
|
new_deployment_record["record"]["request"] = app_deployment_request.id
|
||||||
|
|
||||||
|
if logger:
|
||||||
|
logger.log("Publishing ApplicationDeploymentRecord.")
|
||||||
deployment_id = laconic.publish(new_deployment_record, [deployment_crn])
|
deployment_id = laconic.publish(new_deployment_record, [deployment_crn])
|
||||||
return {"dns": dns_id, "deployment": deployment_id}
|
return {"dns": dns_id, "deployment": deployment_id}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user