Add deployment status
command and fix k8s output for deployment ps
#679
@ -40,6 +40,13 @@ class DockerDeployer(Deployer):
|
|||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
def status(self):
|
||||||
|
try:
|
||||||
|
for p in self.docker.compose.ps():
|
||||||
|
print(f"{p.name}\t{p.state.status}")
|
||||||
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
def ps(self):
|
def ps(self):
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.ps()
|
return self.docker.compose.ps()
|
||||||
|
@ -107,6 +107,14 @@ def down_operation(ctx, delete_volumes, extra_args_list):
|
|||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||||
|
|
||||||
|
|
||||||
|
def status_operation(ctx):
|
||||||
|
global_context = ctx.parent.parent.obj
|
||||||
|
if not global_context.dry_run:
|
||||||
|
if global_context.verbose:
|
||||||
|
print("Running compose status")
|
||||||
|
ctx.obj.deployer.status()
|
||||||
|
|
||||||
|
|
||||||
def ps_operation(ctx):
|
def ps_operation(ctx):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
if not global_context.dry_run:
|
if not global_context.dry_run:
|
||||||
|
@ -31,6 +31,10 @@ class Deployer(ABC):
|
|||||||
def ps(self):
|
def ps(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def status(self):
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
pass
|
pass
|
||||||
|
@ -18,7 +18,7 @@ from pathlib import Path
|
|||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.images import push_images_operation
|
from stack_orchestrator.deploy.images import push_images_operation
|
||||||
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation
|
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation
|
||||||
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
|
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
@ -147,4 +147,5 @@ def logs(ctx, tail, follow, extra_args):
|
|||||||
@command.command()
|
@command.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def status(ctx):
|
def status(ctx):
|
||||||
print(f"Context: {ctx.parent.obj}")
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
status_operation(ctx)
|
||||||
|
@ -26,6 +26,12 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
|||||||
from stack_orchestrator.util import error_exit
|
from stack_orchestrator.util import error_exit
|
||||||
|
|
||||||
|
|
||||||
|
class AttrDict(dict):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(AttrDict, self).__init__(*args, **kwargs)
|
||||||
|
self.__dict__ = self
|
||||||
|
|
||||||
|
|
||||||
def _check_delete_exception(e: client.exceptions.ApiException):
|
def _check_delete_exception(e: client.exceptions.ApiException):
|
||||||
if e.status == 404:
|
if e.status == 404:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -42,7 +48,7 @@ class K8sDeployer(Deployer):
|
|||||||
networking_api: client.NetworkingV1Api
|
networking_api: client.NetworkingV1Api
|
||||||
k8s_namespace: str = "default"
|
k8s_namespace: str = "default"
|
||||||
kind_cluster_name: str
|
kind_cluster_name: str
|
||||||
cluster_info : ClusterInfo
|
cluster_info: ClusterInfo
|
||||||
deployment_dir: Path
|
deployment_dir: Path
|
||||||
deployment_context: DeploymentContext
|
deployment_context: DeploymentContext
|
||||||
|
|
||||||
@ -72,6 +78,7 @@ class K8sDeployer(Deployer):
|
|||||||
self.core_api = client.CoreV1Api()
|
self.core_api = client.CoreV1Api()
|
||||||
self.networking_api = client.NetworkingV1Api()
|
self.networking_api = client.NetworkingV1Api()
|
||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
|
self.custom_obj_api = client.CustomObjectsApi()
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, services):
|
||||||
|
|
||||||
@ -202,15 +209,82 @@ class K8sDeployer(Deployer):
|
|||||||
# Destroy the kind cluster
|
# Destroy the kind cluster
|
||||||
destroy_cluster(self.kind_cluster_name)
|
destroy_cluster(self.kind_cluster_name)
|
||||||
|
|
||||||
def ps(self):
|
def status(self):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
# Call whatever API we need to get the running container list
|
# Call whatever API we need to get the running container list
|
||||||
ret = self.core_api.list_pod_for_all_namespaces(watch=False)
|
all_pods = self.core_api.list_pod_for_all_namespaces(watch=False)
|
||||||
if ret.items:
|
pods = []
|
||||||
for i in ret.items:
|
|
||||||
print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
|
if all_pods.items:
|
||||||
ret = self.core_api.list_node(pretty=True, watch=False)
|
for p in all_pods.items:
|
||||||
return []
|
if self.cluster_info.app_name in p.metadata.name:
|
||||||
|
pods.append(p)
|
||||||
|
|
||||||
|
if not pods:
|
||||||
|
return
|
||||||
|
|
||||||
|
hostname = "?"
|
||||||
|
ip = "?"
|
||||||
|
tls = "?"
|
||||||
|
try:
|
||||||
|
ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace,
|
||||||
|
name=self.cluster_info.get_ingress().metadata.name)
|
||||||
|
|
||||||
|
cert = self.custom_obj_api.get_namespaced_custom_object(
|
||||||
|
group="cert-manager.io",
|
||||||
|
version="v1",
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
plural="certificates",
|
||||||
|
name=ingress.spec.tls[0].secret_name
|
||||||
|
)
|
||||||
|
|
||||||
|
hostname = ingress.spec.tls[0].hosts[0]
|
||||||
|
ip = ingress.status.load_balancer.ingress[0].ip
|
||||||
|
tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"])
|
||||||
|
except: # noqa: E722
|
||||||
|
pass
|
||||||
|
|
||||||
|
print("Ingress:")
|
||||||
|
print("\tHostname:", hostname)
|
||||||
|
print("\tIP:", ip)
|
||||||
|
print("\tTLS:", tls)
|
||||||
|
print("")
|
||||||
|
print("Pods:")
|
||||||
|
|
||||||
|
for p in pods:
|
||||||
|
if p.metadata.deletion_timestamp:
|
||||||
|
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})")
|
||||||
|
else:
|
||||||
|
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})")
|
||||||
|
|
||||||
|
def ps(self):
|
||||||
|
self.connect_api()
|
||||||
|
pods = self.core_api.list_pod_for_all_namespaces(watch=False)
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
for p in pods.items:
|
||||||
|
if self.cluster_info.app_name in p.metadata.name:
|
||||||
|
pod_ip = p.status.pod_ip
|
||||||
|
ports = AttrDict()
|
||||||
|
for c in p.spec.containers:
|
||||||
|
if c.ports:
|
||||||
|
for prt in c.ports:
|
||||||
|
ports[str(prt.container_port)] = [AttrDict({
|
||||||
|
"HostIp": pod_ip,
|
||||||
|
"HostPort": prt.container_port
|
||||||
|
})]
|
||||||
|
|
||||||
|
ret.append(AttrDict({
|
||||||
|
"id": f"{p.metadata.namespace}/{p.metadata.name}",
|
||||||
|
"name": p.metadata.name,
|
||||||
|
"namespace": p.metadata.namespace,
|
||||||
|
"network_settings": AttrDict({
|
||||||
|
"ports": ports
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
# Since we handle the port mapping, need to figure out where this comes from
|
# Since we handle the port mapping, need to figure out where this comes from
|
||||||
|
Loading…
Reference in New Issue
Block a user