Add deployed/error status output to the state file. (#719)
* More status info * Up default resource limits. * Need ps
This commit is contained in:
parent
428b05158e
commit
62af03077f
@ -30,7 +30,7 @@ RUN \
|
|||||||
|
|
||||||
# [Optional] Uncomment this section to install additional OS packages.
|
# [Optional] Uncomment this section to install additional OS packages.
|
||||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||||
&& apt-get -y install --no-install-recommends jq gettext-base
|
&& apt-get -y install --no-install-recommends jq gettext-base procps
|
||||||
|
|
||||||
# [Optional] Uncomment if you want to install more global node modules
|
# [Optional] Uncomment if you want to install more global node modules
|
||||||
# RUN su node -c "npm install -g <your-package-list-here>"
|
# RUN su node -c "npm install -g <your-package-list-here>"
|
||||||
|
@ -195,7 +195,7 @@ class ClusterInfo:
|
|||||||
volume_mounts=volume_mounts,
|
volume_mounts=volume_mounts,
|
||||||
resources=client.V1ResourceRequirements(
|
resources=client.V1ResourceRequirements(
|
||||||
requests={"cpu": "100m", "memory": "200Mi"},
|
requests={"cpu": "100m", "memory": "200Mi"},
|
||||||
limits={"cpu": "500m", "memory": "500Mi"},
|
limits={"cpu": "1000m", "memory": "2000Mi"},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
containers.append(container)
|
containers.append(container)
|
||||||
|
@ -136,13 +136,17 @@ def load_known_requests(filename):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def dump_known_requests(filename, requests):
|
def dump_known_requests(filename, requests, status="SEEN"):
|
||||||
if not filename:
|
if not filename:
|
||||||
return
|
return
|
||||||
known_requests = load_known_requests(filename)
|
known_requests = load_known_requests(filename)
|
||||||
for r in requests:
|
for r in requests:
|
||||||
known_requests[r.id] = r.createTime
|
known_requests[r.id] = {
|
||||||
json.dump(known_requests, open(filename, "w"))
|
"createTime": r.createTime,
|
||||||
|
"status": status
|
||||||
|
}
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
json.dump(known_requests, f)
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@ -201,6 +205,7 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
requests.reverse()
|
requests.reverse()
|
||||||
requests_by_name = {}
|
requests_by_name = {}
|
||||||
for r in requests:
|
for r in requests:
|
||||||
|
# TODO: Do this _after_ filtering deployments and cancellations to minimize round trips.
|
||||||
app = laconic.get_record(r.attributes.application)
|
app = laconic.get_record(r.attributes.application)
|
||||||
if not app:
|
if not app:
|
||||||
print("Skipping request %s, cannot locate app." % r.id)
|
print("Skipping request %s, cannot locate app." % r.id)
|
||||||
@ -256,6 +261,8 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
for r in requests_to_execute:
|
for r in requests_to_execute:
|
||||||
|
dump_known_requests(state_file, [r], "DEPLOYING")
|
||||||
|
status = "ERROR"
|
||||||
try:
|
try:
|
||||||
process_app_deployment_request(
|
process_app_deployment_request(
|
||||||
ctx,
|
ctx,
|
||||||
@ -268,5 +275,6 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
kube_config,
|
kube_config,
|
||||||
image_registry
|
image_registry
|
||||||
)
|
)
|
||||||
|
status = "DEPLOYED"
|
||||||
finally:
|
finally:
|
||||||
dump_known_requests(state_file, [r])
|
dump_known_requests(state_file, [r], status)
|
||||||
|
Loading…
Reference in New Issue
Block a user