Support multiple NodePorts, static NodePort mapping, and add 'replicas' spec option (#913)
All checks were successful
Lint Checks / Run linter (push) Successful in 33s
Publish / Build and publish (push) Successful in 1m7s
Smoke Test / Run basic test suite (push) Successful in 3m51s
Webapp Test / Run webapp test suite (push) Successful in 4m30s
Deploy Test / Run deploy test suite (push) Successful in 4m42s
Fixturenet-Laconicd-Test / Run Laconicd fixturenet and Laconic CLI tests (push) Successful in 12m52s
K8s Deploy Test / Run deploy test suite on kind/k8s (push) Successful in 7m27s
Database Test / Run database hosting test on kind/k8s (push) Successful in 9m35s
Container Registry Test / Run contaier registry hosting test on kind/k8s (push) Successful in 3m54s
External Stack Test / Run external stack test suite (push) Successful in 5m19s

NodePort example:

```
network:
  ports:
    caddy:
     - 1234
     - 32020:2020
```

Replicas example:

```
replicas: 2
```

This also adds an optimization for k8s where if a directory matching the name of a configmap exists in beneath config/ in the stack, its contents will be copied into the corresponding configmap.

For example:

```
# Config files in the stack
❯ ls stack-orchestrator/config/caddyconfig
Caddyfile  Caddyfile.one-req-per-upstream-example

# ConfigMap in the spec
❯ cat foo.yml | grep config
...
configmaps:
  caddyconfig: ./configmaps/caddyconfig

# Create the deployment
❯ laconic-so --stack ~/cerc/caddy-ethcache/stack-orchestrator/stacks/caddy-ethcache deploy create --spec-file foo.yml

# The files from beneath config/<config_map_name> have been copied to the ConfigMap directory from the spec.
❯ ls deployment-001/configmaps/caddyconfig
Caddyfile  Caddyfile.one-req-per-upstream-example
```

Reviewed-on: #913
Reviewed-by: David Boreham <dboreham@noreply.git.vdb.to>
Co-authored-by: Thomas E Lackey <telackey@bozemanpass.com>
Co-committed-by: Thomas E Lackey <telackey@bozemanpass.com>
This commit is contained in:
Thomas E Lackey 2024-08-09 02:32:06 +00:00 committed by Thomas E Lackey
parent 64691bd206
commit 1def279d26
5 changed files with 50 additions and 23 deletions

View File

@ -34,5 +34,6 @@ volumes_key = "volumes"
security_key = "security"
annotations_key = "annotations"
labels_key = "labels"
replicas_key = "replicas"
kind_config_filename = "kind-config.yml"
kube_config_filename = "kubeconfig.yml"

View File

@ -514,6 +514,13 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
os.mkdir(destination_script_dir)
script_paths = get_pod_script_paths(parsed_stack, pod)
_copy_files_to_directory(script_paths, destination_script_dir)
if parsed_spec.is_kubernetes_deployment():
for configmap in parsed_spec.get_configmaps():
source_config_dir = resolve_config_dir(stack_name, configmap)
if os.path.exists(source_config_dir):
destination_config_dir = deployment_dir_path.joinpath("configmaps", configmap)
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
# Delegate to the stack's Python code
# The deploy create command doesn't require a --stack argument so we need to insert the
# stack member here.

View File

@ -78,28 +78,40 @@ class ClusterInfo:
if (opts.o.debug):
print(f"Env vars: {self.environment_variables.map}")
def get_nodeport(self):
def get_nodeports(self):
nodeports = []
for pod_name in self.parsed_pod_yaml_map:
pod = self.parsed_pod_yaml_map[pod_name]
services = pod["services"]
for service_name in services:
service_info = services[service_name]
if "ports" in service_info:
port = int(service_info["ports"][0])
for raw_port in service_info["ports"]:
if opts.o.debug:
print(f"service port: {port}")
print(f"service port: {raw_port}")
if ":" in raw_port:
parts = raw_port.split(":")
if len(parts) != 2:
raise Exception(f"Invalid port definition: {raw_port}")
node_port = int(parts[0])
pod_port = int(parts[1])
else:
node_port = None
pod_port = int(raw_port)
service = client.V1Service(
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport"),
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}"),
spec=client.V1ServiceSpec(
type="NodePort",
ports=[client.V1ServicePort(
port=port,
target_port=port
port=pod_port,
target_port=pod_port,
node_port=node_port
)],
selector={"app": self.app_name}
)
)
return service
nodeports.append(service)
return nodeports
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
# No ingress for a deployment that has no http-proxy defined, for now
@ -373,9 +385,12 @@ class ClusterInfo:
spec=client.V1PodSpec(containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes),
)
spec = client.V1DeploymentSpec(
replicas=1, template=template, selector={
replicas=self.spec.get_replicas(),
template=template, selector={
"matchLabels":
{"app": self.app_name}})
{"app": self.app_name}
}
)
deployment = client.V1Deployment(
api_version="apps/v1",

View File

@ -16,6 +16,7 @@ from datetime import datetime, timezone
from pathlib import Path
from kubernetes import client, config
from typing import List
from stack_orchestrator import constants
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
@ -246,8 +247,8 @@ class K8sDeployer(Deployer):
if opts.o.debug:
print("No ingress configured")
nodeport: client.V1Service = self.cluster_info.get_nodeport()
if nodeport:
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
for nodeport in nodeports:
if opts.o.debug:
print(f"Sending this nodeport: {nodeport}")
if not opts.o.dry_run:
@ -342,10 +343,10 @@ class K8sDeployer(Deployer):
if opts.o.debug:
print("No ingress to delete")
nodeport: client.V1Service = self.cluster_info.get_nodeport()
if nodeport:
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
for nodeport in nodeports:
if opts.o.debug:
print(f"Deleting this nodeport: {ingress}")
print(f"Deleting this nodeport: {nodeport}")
try:
self.core_api.delete_namespaced_service(
namespace=self.k8s_namespace,

View File

@ -117,6 +117,9 @@ class Spec:
def get_annotations(self):
return self.obj.get(constants.annotations_key, {})
def get_replicas(self):
return self.obj.get(constants.replicas_key, 1)
def get_labels(self):
return self.obj.get(constants.labels_key, {})