Compare commits

..

2 Commits

Author SHA1 Message Date
8afae1904b Add support for running jobs from a stack (#975)
All checks were successful
Lint Checks / Run linter (push) Successful in 30s
Part of https://plan.wireit.in/deepstack/browse/VUL-265/

Reviewed-on: #975
Co-authored-by: Prathamesh Musale <prathamesh.musale0@gmail.com>
Co-committed-by: Prathamesh Musale <prathamesh.musale0@gmail.com>
2025-12-04 06:13:28 +00:00
7acabb0743 Add support for generating Helm charts when creating a deployment (#974)
All checks were successful
Lint Checks / Run linter (push) Successful in 29s
Part of https://plan.wireit.in/deepstack/browse/VUL-265/

- Added a flag `--helm-chart` to `deploy create` command
- Uses Kompose CLI wrapper to generate a helm chart from compose files in a stack
- To be handled in a follow on PR(s):
  - Templatize generated charts and generate a `values.yml` file with defaults

Reviewed-on: #974
Co-authored-by: Prathamesh Musale <prathamesh.musale0@gmail.com>
Co-committed-by: Prathamesh Musale <prathamesh.musale0@gmail.com>
2025-11-27 06:43:07 +00:00
17 changed files with 896 additions and 39 deletions

View File

@ -0,0 +1,113 @@
# Helm Chart Generation
Generate Kubernetes Helm charts from stack compose files using Kompose.
## Prerequisites
Install Kompose:
```bash
# Linux
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
chmod +x kompose
sudo mv kompose /usr/local/bin/
# macOS
brew install kompose
# Verify
kompose version
```
## Usage
### 1. Create spec file
```bash
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
--kube-config ~/.kube/config \
--output spec.yml
```
### 2. Generate Helm chart
```bash
laconic-so --stack <stack-name> deploy create \
--spec-file spec.yml \
--deployment-dir my-deployment \
--helm-chart
```
### 3. Deploy to Kubernetes
```bash
helm install my-release my-deployment/chart
kubectl get pods -n zenith
```
## Output Structure
```bash
my-deployment/
├── spec.yml # Reference
├── stack.yml # Reference
└── chart/ # Helm chart
├── Chart.yaml
├── README.md
└── templates/
└── *.yaml
```
## Example
```bash
# Generate chart for stage1-zenithd
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
--kube-config ~/.kube/config \
--output stage1-spec.yml
laconic-so --stack stage1-zenithd deploy create \
--spec-file stage1-spec.yml \
--deployment-dir stage1-deployment \
--helm-chart
# Deploy
helm install stage1-zenithd stage1-deployment/chart
```
## Production Deployment (TODO)
### Local Development
```bash
# Access services using port-forward
kubectl port-forward service/zenithd 26657:26657
kubectl port-forward service/nginx-api-proxy 1317:80
kubectl port-forward service/cosmos-explorer 4173:4173
```
### Production Access Options
- Option 1: Ingress + cert-manager (Recommended)
- Install ingress-nginx + cert-manager
- Point DNS to cluster LoadBalancer IP
- Auto-provisions Let's Encrypt TLS certs
- Access: `https://api.zenith.example.com`
- Option 2: Cloud LoadBalancer
- Use cloud provider's LoadBalancer service type
- Point DNS to assigned external IP
- Manual TLS cert management
- Option 3: Bare Metal (MetalLB + Ingress)
- MetalLB provides LoadBalancer IPs from local network
- Same Ingress setup as cloud
- Option 4: NodePort + External Proxy
- Expose services on 30000-32767 range
- External nginx/Caddy proxies 80/443 → NodePort
- Manual cert management
### Changes Needed
- Add Ingress template to charts
- Add TLS configuration to values.yaml
- Document cert-manager setup
- Add production deployment guide

View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from ruamel.yaml import YAML
def create(context: DeploymentContext, extra_args):
@ -22,12 +23,17 @@ def create(context: DeploymentContext, extra_args):
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
yaml = YAML()
yaml_data = yaml.load(yaml_file)
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
def add_geth_volume(yaml_data):
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
yaml = YAML()
yaml.dump(yaml_data, yaml_file)
return None

View File

@ -2,6 +2,7 @@ version: "1.0"
name: test
description: "A test stack"
repos:
- git.vdb.to/cerc-io/laconicd
- git.vdb.to/cerc-io/test-project@test-branch
containers:
- cerc/test-container

View File

@ -94,6 +94,40 @@ class DockerDeployer(Deployer):
except DockerException as e:
raise DeployerException(e)
def run_job(self, job_name: str, release_name: str = None):
# release_name is ignored for Docker deployments (only used for K8s/Helm)
if not opts.o.dry_run:
try:
# Find job compose file in compose-jobs directory
# The deployment should have compose-jobs/docker-compose-<job_name>.yml
if not self.docker.compose_files:
raise DeployerException("No compose files configured")
# Deployment directory is parent of compose directory
compose_dir = Path(self.docker.compose_files[0]).parent
deployment_dir = compose_dir.parent
job_compose_file = deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
if not job_compose_file.exists():
raise DeployerException(f"Job compose file not found: {job_compose_file}")
if opts.o.verbose:
print(f"Running job from: {job_compose_file}")
# Create a DockerClient for the job compose file with same project name and env file
# This allows the job to access volumes from the main deployment
job_docker = DockerClient(
compose_files=[job_compose_file],
compose_project_name=self.docker.compose_project_name,
compose_env_file=self.docker.compose_env_file
)
# Run the job with --rm flag to remove container after completion
return job_docker.compose.run(service=job_name, remove=True, tty=True)
except DockerException as e:
raise DeployerException(e)
class DockerDeployerConfigGenerator(DeployerConfigGenerator):

View File

@ -84,7 +84,22 @@ def create_deploy_context(
# Extract the cluster name from the deployment, if we have one
if deployment_context and cluster is None:
cluster = deployment_context.get_cluster_id()
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
# Check if this is a helm chart deployment (has chart/ but no compose/)
# TODO: Add a new deployment type for helm chart deployments
# To avoid relying on chart existence in such cases
is_helm_chart_deployment = False
if deployment_context:
chart_dir = deployment_context.deployment_dir / "chart"
compose_dir = deployment_context.deployment_dir / "compose"
is_helm_chart_deployment = chart_dir.exists() and not compose_dir.exists()
# For helm chart deployments, skip compose file loading
if is_helm_chart_deployment:
cluster_context = ClusterContext(global_context, cluster, [], [], [], None, env_file)
else:
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
@ -188,6 +203,17 @@ def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
print(stream_content.decode("utf-8"), end="")
def run_job_operation(ctx, job_name: str, helm_release: str = None):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
print(f"Running job: {job_name}")
try:
ctx.obj.deployer.run_job(job_name, helm_release)
except Exception as e:
print(f"Error running job {job_name}: {e}")
sys.exit(1)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context

View File

@ -55,6 +55,10 @@ class Deployer(ABC):
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
pass
@abstractmethod
def run_job(self, job_name: str, release_name: str = None):
pass
class DeployerException(Exception):
def __init__(self, *args: object) -> None:

View File

@ -167,3 +167,14 @@ def status(ctx):
def update(ctx):
ctx.obj = make_deploy_context(ctx)
update_operation(ctx)
@command.command()
@click.argument('job_name')
@click.option('--helm-release', help='Helm release name (only for k8s helm chart deployments, defaults to chart name)')
@click.pass_context
def run_job(ctx, job_name, helm_release):
'''run a one-time job from the stack'''
from stack_orchestrator.deploy.deploy import run_job_operation
ctx.obj = make_deploy_context(ctx)
run_job_operation(ctx, job_name, helm_release)

View File

@ -45,14 +45,11 @@ class DeploymentContext:
def get_compose_dir(self):
return self.deployment_dir.joinpath(constants.compose_dir_name)
def get_compose_file(self, name: str):
return self.get_compose_dir() / f"docker-compose-{name}.yml"
def get_cluster_id(self):
return self.id
def init(self, dir: Path):
self.deployment_dir = dir.absolute()
def init(self, dir):
self.deployment_dir = dir
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
self.stack = Stack(self.spec.obj["stack"])
@ -69,19 +66,3 @@ class DeploymentContext:
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
self.id = f"{constants.cluster_name_prefix}{hash}"
def modify_yaml(self, file_path: Path, modifier_func):
"""
Load a YAML from the deployment, apply a modification function, and write it back.
"""
if not file_path.absolute().is_relative_to(self.deployment_dir):
raise ValueError(f"File is not inside deployment directory: {file_path}")
yaml = get_yaml()
with open(file_path, 'r') as f:
yaml_data = yaml.load(f)
modifier_func(yaml_data)
with open(file_path, 'w') as f:
yaml.dump(yaml_data, f)

View File

@ -27,7 +27,7 @@ from stack_orchestrator.opts import opts
from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config,
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
resolve_config_dir)
resolve_config_dir, get_job_list, get_job_file_path)
from stack_orchestrator.deploy.spec import Spec
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
@ -443,20 +443,24 @@ def _check_volume_definitions(spec):
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.argument('extra_args', nargs=-1, type=click.UNPROCESSED)
@click.option("--helm-chart", is_flag=True, default=False, help="Generate Helm chart instead of deploying (k8s only)")
# TODO: Hack
@click.option("--network-dir", help="Network configuration supplied in this directory")
@click.option("--initial-peers", help="Initial set of persistent peers")
@click.pass_context
def create(ctx, spec_file, deployment_dir, extra_args):
def create(ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers):
deployment_command_context = ctx.obj
return create_operation(deployment_command_context, spec_file, deployment_dir, extra_args)
return create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers)
# The init command's implementation is in a separate function so that we can
# call it from other commands, bypassing the click decoration stuff
def create_operation(deployment_command_context, spec_file, deployment_dir, extra_args):
def create_operation(deployment_command_context, spec_file, deployment_dir, helm_chart, network_dir, initial_peers):
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
_check_volume_definitions(parsed_spec)
stack_name = parsed_spec["stack"]
deployment_type = parsed_spec[constants.deploy_to_key]
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
parsed_stack = get_parsed_stack_config(stack_name)
if opts.o.debug:
@ -471,7 +475,17 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, extr
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
# Create deployment.yml with cluster-id
_create_deployment_file(deployment_dir_path)
# Branch to Helm chart generation flow if --helm-chart flag is set
if deployment_type == "k8s" and helm_chart:
from stack_orchestrator.deploy.k8s.helm.chart_generator import generate_helm_chart
generate_helm_chart(stack_name, spec_file, deployment_dir_path)
return # Exit early for helm chart generation
# Existing deployment flow continues unchanged
# Copy any config varibles from the spec file into an env file suitable for compose
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
# Copy any k8s config file into the deployment dir
@ -529,6 +543,21 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, extr
if os.path.exists(destination_config_dir) and not os.listdir(destination_config_dir):
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
# Copy the job files into the deployment dir (for Docker deployments)
jobs = get_job_list(parsed_stack)
if jobs and not parsed_spec.is_kubernetes_deployment():
destination_compose_jobs_dir = deployment_dir_path.joinpath("compose-jobs")
os.mkdir(destination_compose_jobs_dir)
for job in jobs:
job_file_path = get_job_file_path(stack_name, parsed_stack, job)
if job_file_path and job_file_path.exists():
parsed_job_file = yaml.load(open(job_file_path, "r"))
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
with open(destination_compose_jobs_dir.joinpath("docker-compose-%s.yml" % job), "w") as output_file:
yaml.dump(parsed_job_file, output_file)
if opts.o.debug:
print(f"Copied job compose file: {job}")
# Delegate to the stack's Python code
# The deploy create command doesn't require a --stack argument so we need to insert the
# stack member here.
@ -539,7 +568,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, extr
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
# TODO: make deployment_dir_path a Path above
deployer_config_generator.generate(deployment_dir_path)
call_stack_deploy_create(deployment_context, extra_args)
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
# TODO: this code should be in the stack .py files but

View File

@ -510,6 +510,26 @@ class K8sDeployer(Deployer):
# We need to figure out how to do this -- check why we're being called first
pass
def run_job(self, job_name: str, helm_release: str = None):
if not opts.o.dry_run:
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
# Check if this is a helm-based deployment
chart_dir = self.deployment_dir / "chart"
if not chart_dir.exists():
# TODO: Implement job support for compose-based K8s deployments
raise Exception(f"Job support is only available for helm-based deployments. Chart directory not found: {chart_dir}")
# Run the job using the helm job runner
run_helm_job(
chart_dir=chart_dir,
job_name=job_name,
release=helm_release,
namespace=self.k8s_namespace,
timeout=600,
verbose=opts.o.verbose
)
def is_kind(self):
return self.type == "k8s-kind"

View File

@ -0,0 +1,14 @@
# Copyright © 2025 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.

View File

@ -0,0 +1,320 @@
# Copyright © 2025 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from pathlib import Path
from stack_orchestrator import constants
from stack_orchestrator.opts import opts
from stack_orchestrator.util import (
get_parsed_stack_config,
get_pod_list,
get_pod_file_path,
get_job_list,
get_job_file_path,
error_exit
)
from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import (
check_kompose_available,
get_kompose_version,
convert_to_helm_chart
)
from stack_orchestrator.util import get_yaml
def _wrap_job_templates_with_conditionals(chart_dir: Path, jobs: list) -> None:
"""
Wrap job templates with conditional checks so they are not created by default.
Jobs will only be created when explicitly enabled via --set jobs.<name>.enabled=true
"""
templates_dir = chart_dir / "templates"
if not templates_dir.exists():
return
for job_name in jobs:
# Find job template file (kompose generates <service-name>-job.yaml)
job_template_file = templates_dir / f"{job_name}-job.yaml"
if not job_template_file.exists():
if opts.o.debug:
print(f"Warning: Job template not found: {job_template_file}")
continue
# Read the template content
content = job_template_file.read_text()
# Wrap with conditional (default false)
# Use 'index' function to handle job names with dashes
# Provide default dict for .Values.jobs to handle case where it doesn't exist
condition = (
f"{{{{- if (index (.Values.jobs | default dict) "
f'"{job_name}" | default dict).enabled | default false }}}}'
)
wrapped_content = f"""{condition}
{content}{{{{- end }}}}
"""
# Write back
job_template_file.write_text(wrapped_content)
if opts.o.debug:
print(f"Wrapped job template with conditional: {job_template_file.name}")
def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None:
"""
Post-process Kompose-generated chart to fix common issues.
Fixes:
1. Chart.yaml name, description and keywords
2. Add conditional wrappers to job templates (default: disabled)
TODO:
- Add defaultMode: 0755 to ConfigMap volumes containing scripts (.sh files)
"""
yaml = get_yaml()
# Fix Chart.yaml
chart_yaml_path = chart_dir / "Chart.yaml"
if chart_yaml_path.exists():
chart_yaml = yaml.load(open(chart_yaml_path, "r"))
# Fix name
chart_yaml["name"] = chart_name
# Fix description
chart_yaml["description"] = f"Generated Helm chart for {chart_name} stack"
# Fix keywords
if "keywords" in chart_yaml and isinstance(chart_yaml["keywords"], list):
chart_yaml["keywords"] = [chart_name]
with open(chart_yaml_path, "w") as f:
yaml.dump(chart_yaml, f)
# Process job templates: wrap with conditionals (default disabled)
if jobs:
_wrap_job_templates_with_conditionals(chart_dir, jobs)
def generate_helm_chart(stack_path: str, spec_file: str, deployment_dir_path: Path) -> None:
"""
Generate a self-sufficient Helm chart from stack compose files using Kompose.
Args:
stack_path: Path to the stack directory
spec_file: Path to the deployment spec file
deployment_dir_path: Deployment directory path (already created with deployment.yml)
Output structure:
deployment-dir/
deployment.yml # Contains cluster-id
spec.yml # Reference
stack.yml # Reference
chart/ # Self-sufficient Helm chart
Chart.yaml
README.md
templates/
*.yaml
TODO: Enhancements:
- Convert Deployments to StatefulSets for stateful services (zenithd, postgres)
- Add _helpers.tpl with common label/selector functions
- Enhance Chart.yaml with proper metadata (version, description, etc.)
"""
parsed_stack = get_parsed_stack_config(stack_path)
stack_name = parsed_stack.get("name", stack_path)
# 1. Check Kompose availability
if not check_kompose_available():
error_exit("kompose not found in PATH.\n")
# 2. Read cluster-id from deployment.yml
deployment_file = deployment_dir_path / constants.deployment_file_name
if not deployment_file.exists():
error_exit(f"Deployment file not found: {deployment_file}")
yaml = get_yaml()
deployment_config = yaml.load(open(deployment_file, "r"))
cluster_id = deployment_config.get(constants.cluster_id_key)
if not cluster_id:
error_exit(f"cluster-id not found in {deployment_file}")
# 3. Derive chart name from stack name + cluster-id suffix
# Sanitize stack name for use in chart name
sanitized_stack_name = stack_name.replace("_", "-").replace(" ", "-")
# Extract hex suffix from cluster-id (after the prefix)
# cluster-id format: "laconic-<hex>" -> extract the hex part
cluster_id_suffix = cluster_id.split("-", 1)[1] if "-" in cluster_id else cluster_id
# Combine to create human-readable + unique chart name
chart_name = f"{sanitized_stack_name}-{cluster_id_suffix}"
if opts.o.debug:
print(f"Cluster ID: {cluster_id}")
print(f"Chart name: {chart_name}")
# 4. Get compose files from stack (pods + jobs)
pods = get_pod_list(parsed_stack)
if not pods:
error_exit(f"No pods found in stack: {stack_path}")
jobs = get_job_list(parsed_stack)
if opts.o.debug:
print(f"Found {len(pods)} pod(s) in stack: {pods}")
if jobs:
print(f"Found {len(jobs)} job(s) in stack: {jobs}")
compose_files = []
for pod in pods:
pod_file = get_pod_file_path(stack_path, parsed_stack, pod)
if not pod_file.exists():
error_exit(f"Pod file not found: {pod_file}")
compose_files.append(pod_file)
if opts.o.debug:
print(f"Found compose file: {pod_file.name}")
# Add job compose files
job_files = []
for job in jobs:
job_file = get_job_file_path(stack_path, parsed_stack, job)
if not job_file.exists():
error_exit(f"Job file not found: {job_file}")
compose_files.append(job_file)
job_files.append(job_file)
if opts.o.debug:
print(f"Found job compose file: {job_file.name}")
try:
version = get_kompose_version()
print(f"Using kompose version: {version}")
except Exception as e:
error_exit(f"Failed to get kompose version: {e}")
# 5. Create chart directory and invoke Kompose
chart_dir = deployment_dir_path / "chart"
print(f"Converting {len(compose_files)} compose file(s) to Helm chart using Kompose...")
try:
output = convert_to_helm_chart(
compose_files=compose_files,
output_dir=chart_dir,
chart_name=chart_name
)
if opts.o.debug:
print(f"Kompose output:\n{output}")
except Exception as e:
error_exit(f"Helm chart generation failed: {e}")
# 6. Post-process generated chart
_post_process_chart(chart_dir, chart_name, jobs)
# 7. Generate README.md with basic installation instructions
readme_content = f"""# {chart_name} Helm Chart
Generated by laconic-so from stack: `{stack_path}`
## Prerequisites
- Kubernetes cluster (v1.27+)
- Helm (v3.12+)
- kubectl configured to access your cluster
## Installation
```bash
# Install the chart
helm install {chart_name} {chart_dir}
# Alternatively, install with your own release name
# helm install <your-release-name> {chart_dir}
# Check deployment status
kubectl get pods
```
## Upgrade
To apply changes made to chart, perform upgrade:
```bash
helm upgrade {chart_name} {chart_dir}
```
## Uninstallation
```bash
helm uninstall {chart_name}
```
## Configuration
The chart was generated from Docker Compose files using Kompose.
### Customization
Edit the generated template files in `templates/` to customize:
- Image repositories and tags
- Resource limits (CPU, memory)
- Persistent volume sizes
- Replica counts
"""
readme_path = chart_dir / "README.md"
readme_path.write_text(readme_content)
if opts.o.debug:
print(f"Generated README: {readme_path}")
# 7. Success message
print(f"\n{'=' * 60}")
print("✓ Helm chart generated successfully!")
print(f"{'=' * 60}")
print("\nChart details:")
print(f" Name: {chart_name}")
print(f" Location: {chart_dir.absolute()}")
print(f" Stack: {stack_path}")
# Count generated files
template_files = list((chart_dir / "templates").glob("*.yaml")) if (chart_dir / "templates").exists() else []
print(f" Files: {len(template_files)} template(s) generated")
print("\nDeployment directory structure:")
print(f" {deployment_dir_path}/")
print(" ├── deployment.yml (cluster-id)")
print(" ├── spec.yml (reference)")
print(" ├── stack.yml (reference)")
print(" └── chart/ (self-sufficient Helm chart)")
print("\nNext steps:")
print(" 1. Review the chart:")
print(f" cd {chart_dir}")
print(" cat Chart.yaml")
print("")
print(" 2. Review generated templates:")
print(" ls templates/")
print("")
print(" 3. Install to Kubernetes:")
print(f" helm install {chart_name} {chart_dir}")
print("")
print(" # Or use your own release name")
print(f" helm install <your-release-name> {chart_dir}")
print("")
print(" 4. Check deployment:")
print(" kubectl get pods")
print("")

View File

@ -0,0 +1,149 @@
# Copyright © 2025 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import subprocess
import tempfile
import os
import json
from pathlib import Path
from stack_orchestrator.util import get_yaml
def get_release_name_from_chart(chart_dir: Path) -> str:
"""
Read the chart name from Chart.yaml to use as the release name.
Args:
chart_dir: Path to the Helm chart directory
Returns:
Chart name from Chart.yaml
Raises:
Exception if Chart.yaml not found or name is missing
"""
chart_yaml_path = chart_dir / "Chart.yaml"
if not chart_yaml_path.exists():
raise Exception(f"Chart.yaml not found: {chart_yaml_path}")
yaml = get_yaml()
chart_yaml = yaml.load(open(chart_yaml_path, "r"))
if "name" not in chart_yaml:
raise Exception(f"Chart name not found in {chart_yaml_path}")
return chart_yaml["name"]
def run_helm_job(
chart_dir: Path,
job_name: str,
release: str = None,
namespace: str = "default",
timeout: int = 600,
verbose: bool = False
) -> None:
"""
Run a one-time job from a Helm chart.
This function:
1. Uses provided release name, or reads it from Chart.yaml if not provided
2. Uses helm template to render the job manifest with the job enabled
3. Applies the job manifest to the cluster
4. Waits for the job to complete
Args:
chart_dir: Path to the Helm chart directory
job_name: Name of the job to run (without -job suffix)
release: Optional Helm release name (defaults to chart name from Chart.yaml)
namespace: Kubernetes namespace
timeout: Timeout in seconds for job completion (default: 600)
verbose: Enable verbose output
Raises:
Exception if the job fails or times out
"""
if not chart_dir.exists():
raise Exception(f"Chart directory not found: {chart_dir}")
# Use provided release name, or get it from Chart.yaml
if release is None:
release = get_release_name_from_chart(chart_dir)
if verbose:
print(f"Using release name from Chart.yaml: {release}")
else:
if verbose:
print(f"Using provided release name: {release}")
job_template_file = f"templates/{job_name}-job.yaml"
if verbose:
print(f"Running job '{job_name}' from helm chart: {chart_dir}")
# Use helm template to render the job manifest
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as tmp_file:
try:
# Render job template with job enabled
# Use --set-json to properly handle job names with dashes
jobs_dict = {job_name: {"enabled": True}}
values_json = json.dumps(jobs_dict)
helm_cmd = [
"helm", "template", release, str(chart_dir),
"--show-only", job_template_file,
"--set-json", f"jobs={values_json}"
]
if verbose:
print(f"Running: {' '.join(helm_cmd)}")
result = subprocess.run(helm_cmd, check=True, capture_output=True, text=True)
tmp_file.write(result.stdout)
tmp_file.flush()
if verbose:
print(f"Generated job manifest:\n{result.stdout}")
# Parse the manifest to get the actual job name
yaml = get_yaml()
manifest = yaml.load(result.stdout)
actual_job_name = manifest.get("metadata", {}).get("name", job_name)
# Apply the job manifest
kubectl_apply_cmd = ["kubectl", "apply", "-f", tmp_file.name, "-n", namespace]
subprocess.run(kubectl_apply_cmd, check=True, capture_output=True, text=True)
if verbose:
print(f"Job {actual_job_name} created, waiting for completion...")
# Wait for job completion
wait_cmd = [
"kubectl", "wait", "--for=condition=complete",
f"job/{actual_job_name}",
f"--timeout={timeout}s",
"-n", namespace
]
subprocess.run(wait_cmd, check=True, capture_output=True, text=True)
if verbose:
print(f"Job {job_name} completed successfully")
except subprocess.CalledProcessError as e:
error_msg = e.stderr if e.stderr else str(e)
raise Exception(f"Job failed: {error_msg}")
finally:
# Clean up temp file
if os.path.exists(tmp_file.name):
os.unlink(tmp_file.name)

View File

@ -0,0 +1,109 @@
# Copyright © 2025 Vulcanize
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import subprocess
import shutil
from pathlib import Path
from typing import List
def check_kompose_available() -> bool:
"""Check if kompose binary is available in PATH."""
return shutil.which("kompose") is not None
def get_kompose_version() -> str:
"""
Get the installed kompose version.
Returns:
Version string (e.g., "1.34.0")
Raises:
Exception if kompose is not available
"""
if not check_kompose_available():
raise Exception("kompose not found in PATH")
result = subprocess.run(
["kompose", "version"],
capture_output=True,
text=True,
timeout=10
)
if result.returncode != 0:
raise Exception(f"Failed to get kompose version: {result.stderr}")
# Parse version from output like "1.34.0 (HEAD)"
# Output format: "1.34.0 (HEAD)" or just "1.34.0"
version_line = result.stdout.strip()
version = version_line.split()[0] if version_line else "unknown"
return version
def convert_to_helm_chart(compose_files: List[Path], output_dir: Path, chart_name: str = None) -> str:
"""
Invoke kompose to convert Docker Compose files to a Helm chart.
Args:
compose_files: List of paths to docker-compose.yml files
output_dir: Directory where the Helm chart will be generated
chart_name: Optional name for the chart (defaults to directory name)
Returns:
stdout from kompose command
Raises:
Exception if kompose conversion fails
"""
if not check_kompose_available():
raise Exception(
"kompose not found in PATH. "
"Install from: https://kompose.io/installation/"
)
# Ensure output directory exists
output_dir.mkdir(parents=True, exist_ok=True)
# Build kompose command
cmd = ["kompose", "convert"]
# Add all compose files
for compose_file in compose_files:
if not compose_file.exists():
raise Exception(f"Compose file not found: {compose_file}")
cmd.extend(["-f", str(compose_file)])
# Add chart flag and output directory
cmd.extend(["--chart", "-o", str(output_dir)])
# Execute kompose
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=60
)
if result.returncode != 0:
raise Exception(
f"Kompose conversion failed:\n"
f"Command: {' '.join(cmd)}\n"
f"Error: {result.stderr}"
)
return result.stdout

View File

@ -91,7 +91,9 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
deploy_command_context,
spec_file_name,
deployment_dir,
False,
None,
None
)
# Fix up the container tag inside the deployment compose file
_fixup_container_tag(deployment_dir, image)

View File

@ -78,6 +78,22 @@ def get_pod_list(parsed_stack):
return result
def get_job_list(parsed_stack):
# Return list of jobs from stack config, or empty list if no jobs defined
if "jobs" not in parsed_stack:
return []
jobs = parsed_stack["jobs"]
if not jobs:
return []
if type(jobs[0]) is str:
result = jobs
else:
result = []
for job in jobs:
result.append(job["name"])
return result
def get_plugin_code_paths(stack) -> List[Path]:
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
@ -119,6 +135,21 @@ def resolve_compose_file(stack, pod_name: str):
return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
# Find a job compose file in compose-jobs directory
def resolve_job_compose_file(stack, job_name: str):
if stack_is_external(stack):
# First try looking in the external stack for the job compose file
compose_jobs_base = Path(stack).parent.parent.joinpath("compose-jobs")
proposed_file = compose_jobs_base.joinpath(f"docker-compose-{job_name}.yml")
if proposed_file.exists():
return proposed_file
# If we don't find it fall through to the internal case
# TODO: Add internal compose-jobs directory support if needed
# For now, jobs are expected to be in external stacks only
compose_jobs_base = Path(stack).parent.parent.joinpath("compose-jobs")
return compose_jobs_base.joinpath(f"docker-compose-{job_name}.yml")
def get_pod_file_path(stack, parsed_stack, pod_name: str):
pods = parsed_stack["pods"]
if type(pods[0]) is str:
@ -131,6 +162,18 @@ def get_pod_file_path(stack, parsed_stack, pod_name: str):
return result
def get_job_file_path(stack, parsed_stack, job_name: str):
if "jobs" not in parsed_stack or not parsed_stack["jobs"]:
return None
jobs = parsed_stack["jobs"]
if type(jobs[0]) is str:
result = resolve_job_compose_file(stack, job_name)
else:
# TODO: Support complex job definitions if needed
result = resolve_job_compose_file(stack, job_name)
return result
def get_pod_script_paths(parsed_stack, pod_name: str):
pods = parsed_stack["pods"]
result = []

View File

@ -14,13 +14,8 @@ delete_cluster_exit () {
# Test basic stack-orchestrator deploy
echo "Running stack-orchestrator deploy test"
if [ "$1" == "from-path" ]; then
TEST_TARGET_SO="laconic-so"
else
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
fi
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"