Compare commits
1 Commits
main
...
pm-update-
| Author | SHA1 | Date | |
|---|---|---|---|
| 2fce823123 |
@ -1,23 +1,19 @@
|
|||||||
name: K8s Deployment Control Test
|
name: Fixturenet-Eth-Plugeth-Arm-Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-arm-test'
|
||||||
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
|
||||||
- 'tests/k8s-deployment-control/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '3 30 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run deployment control suite on kind/k8s"
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest-arm
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -36,22 +32,13 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Check cgroups version"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: mount | grep cgroup
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment control test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/k8s-deployment-control/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
57
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
57
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
name: Fixturenet-Eth-Plugeth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||||
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
55
.gitea/workflows/fixturenet-eth-test.yml
Normal file
55
.gitea/workflows/fixturenet-eth-test.yml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
name: Fixturenet-Eth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -34,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -36,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -33,7 +32,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-arm-test CI job
|
||||||
|
|
||||||
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||||
|
trigger
|
||||||
|
trigger
|
||||||
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
@ -7,4 +7,3 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
|
||||||
|
|||||||
@ -1,3 +1 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
Triggered: 2026-01-21
|
|
||||||
Triggered: 2026-01-21 19:28:29
|
|
||||||
|
|||||||
@ -1 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
@ -1,34 +0,0 @@
|
|||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v5.0.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
args: ['--allow-multiple-documents']
|
|
||||||
- id: check-json
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-added-large-files
|
|
||||||
|
|
||||||
- repo: https://github.com/psf/black
|
|
||||||
rev: 23.12.1
|
|
||||||
hooks:
|
|
||||||
- id: black
|
|
||||||
language_version: python3
|
|
||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
|
||||||
rev: 7.1.1
|
|
||||||
hooks:
|
|
||||||
- id: flake8
|
|
||||||
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
|
||||||
|
|
||||||
- repo: https://github.com/RobertCraigie/pyright-python
|
|
||||||
rev: v1.1.345
|
|
||||||
hooks:
|
|
||||||
- id: pyright
|
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint
|
|
||||||
rev: v1.35.1
|
|
||||||
hooks:
|
|
||||||
- id: yamllint
|
|
||||||
args: [-d, relaxed]
|
|
||||||
@ -1,151 +0,0 @@
|
|||||||
# Plan: Make Stack-Orchestrator AI-Friendly
|
|
||||||
|
|
||||||
## Goal
|
|
||||||
|
|
||||||
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 1: Documentation & Context Files
|
|
||||||
|
|
||||||
### 1.1 Add CLAUDE.md
|
|
||||||
|
|
||||||
Create a root-level context file for AI assistants.
|
|
||||||
|
|
||||||
**File:** `CLAUDE.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Project overview (what stack-orchestrator does)
|
|
||||||
- Stack creation workflow (step-by-step)
|
|
||||||
- File naming conventions
|
|
||||||
- Required vs optional fields in stack.yml
|
|
||||||
- Common patterns and anti-patterns
|
|
||||||
- Links to example stacks (simple, medium, complex)
|
|
||||||
|
|
||||||
### 1.2 Add JSON Schema for stack.yml
|
|
||||||
|
|
||||||
Create formal validation schema.
|
|
||||||
|
|
||||||
**File:** `schemas/stack-schema.json`
|
|
||||||
|
|
||||||
Benefits:
|
|
||||||
- AI tools can validate generated stacks
|
|
||||||
- IDEs provide autocomplete
|
|
||||||
- CI can catch errors early
|
|
||||||
|
|
||||||
### 1.3 Add Template Stack with Comments
|
|
||||||
|
|
||||||
Create an annotated template for reference.
|
|
||||||
|
|
||||||
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Stack definition template - copy this directory to create a new stack
|
|
||||||
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
|
||||||
name: my-stack # Required: lowercase, hyphens only
|
|
||||||
description: "Human-readable description" # Optional
|
|
||||||
repos: # Git repositories to clone
|
|
||||||
- github.com/org/repo
|
|
||||||
containers: # Container images to build (must have matching container-build/)
|
|
||||||
- cerc/my-container
|
|
||||||
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
|
||||||
- my-pod
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.4 Document Validation Rules
|
|
||||||
|
|
||||||
Create explicit documentation of constraints currently scattered in code.
|
|
||||||
|
|
||||||
**File:** `docs/stack-format.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Container names must start with `cerc/`
|
|
||||||
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
|
||||||
- Repository format: `host/org/repo[@ref]`
|
|
||||||
- Stack directory name should match `name` field
|
|
||||||
- Version field options and differences
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 2: Add `create-stack` Command
|
|
||||||
|
|
||||||
### 2.1 Command Overview
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Behavior:**
|
|
||||||
1. Parse repo URL to extract app name (if --name not provided)
|
|
||||||
2. Create `stacks/{name}/stack.yml`
|
|
||||||
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
|
||||||
4. Create `compose/docker-compose-{name}.yml`
|
|
||||||
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
|
||||||
|
|
||||||
### 2.2 Files to Create
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | Package init |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
|
||||||
|
|
||||||
### 2.3 Files to Modify
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
### 2.4 Command Options
|
|
||||||
|
|
||||||
| Option | Required | Description |
|
|
||||||
|--------|----------|-------------|
|
|
||||||
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
|
||||||
| `--name` | No | Stack name (defaults to repo name) |
|
|
||||||
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
|
||||||
| `--force` | No | Overwrite existing files |
|
|
||||||
|
|
||||||
### 2.5 Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| service | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| empty | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 3: Implementation Summary
|
|
||||||
|
|
||||||
### New Files (6)
|
|
||||||
|
|
||||||
1. `CLAUDE.md` - AI assistant context
|
|
||||||
2. `schemas/stack-schema.json` - Validation schema
|
|
||||||
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
|
||||||
4. `docs/stack-format.md` - Stack format documentation
|
|
||||||
5. `stack_orchestrator/create/__init__.py` - Package init
|
|
||||||
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
|
||||||
|
|
||||||
### Modified Files (1)
|
|
||||||
|
|
||||||
1. `stack_orchestrator/main.py` - Register create-stack command
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# 3. Creates all expected files
|
|
||||||
laconic-so create-stack --repo github.com/org/test-app
|
|
||||||
ls stack_orchestrator/data/stacks/test-app/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-test-app/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
|
||||||
|
|
||||||
# 4. Build works with generated stack
|
|
||||||
laconic-so --stack test-app build-containers
|
|
||||||
```
|
|
||||||
121
CLAUDE.md
121
CLAUDE.md
@ -1,121 +0,0 @@
|
|||||||
# CLAUDE.md
|
|
||||||
|
|
||||||
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
|
||||||
|
|
||||||
## Some rules to follow
|
|
||||||
NEVER speculate about the cause of something
|
|
||||||
NEVER assume your hypotheses are true without evidence
|
|
||||||
|
|
||||||
ALWAYS clearly state when something is a hypothesis
|
|
||||||
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
|
||||||
ALWAYS run `pre-commit run --all-files` before committing changes
|
|
||||||
|
|
||||||
## Key Principles
|
|
||||||
|
|
||||||
### Development Guidelines
|
|
||||||
- **Single responsibility** - Each component has one clear purpose
|
|
||||||
- **Fail fast** - Let errors propagate, don't hide failures
|
|
||||||
- **DRY/KISS** - Minimize duplication and complexity
|
|
||||||
|
|
||||||
## Development Philosophy: Conversational Literate Programming
|
|
||||||
|
|
||||||
### Approach
|
|
||||||
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
|
||||||
|
|
||||||
### Core Principles
|
|
||||||
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
|
||||||
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
|
||||||
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
|
||||||
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
|
||||||
|
|
||||||
### Working Method
|
|
||||||
1. **Explore and Understand**: Read existing code to understand current state
|
|
||||||
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
|
||||||
3. **Document Intent**: Update TODO.md with clear justification before coding
|
|
||||||
4. **Explain Changes**: Each modification includes reasoning and context
|
|
||||||
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
|
||||||
|
|
||||||
### Implementation Guidelines
|
|
||||||
- Treat conversations as primary documentation
|
|
||||||
- Explain architectural decisions before implementing
|
|
||||||
- Use TODO.md as the "literate document" that justifies all work
|
|
||||||
- Maintain clear narrative threads across sessions
|
|
||||||
- Workshop complex ideas before coding
|
|
||||||
|
|
||||||
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
|
||||||
|
|
||||||
## External Stacks Preferred
|
|
||||||
|
|
||||||
When creating new stacks for any reason, **use the external stack pattern** rather than adding stacks directly to this repository.
|
|
||||||
|
|
||||||
External stacks follow this structure:
|
|
||||||
|
|
||||||
```
|
|
||||||
my-stack/
|
|
||||||
└── stack-orchestrator/
|
|
||||||
├── stacks/
|
|
||||||
│ └── my-stack/
|
|
||||||
│ ├── stack.yml
|
|
||||||
│ └── README.md
|
|
||||||
├── compose/
|
|
||||||
│ └── docker-compose-my-stack.yml
|
|
||||||
└── config/
|
|
||||||
└── my-stack/
|
|
||||||
└── (config files)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Usage
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Fetch external stack
|
|
||||||
laconic-so fetch-stack github.com/org/my-stack
|
|
||||||
|
|
||||||
# Use external stack
|
|
||||||
STACK_PATH=~/cerc/my-stack/stack-orchestrator/stacks/my-stack
|
|
||||||
laconic-so --stack $STACK_PATH deploy init --output spec.yml
|
|
||||||
laconic-so --stack $STACK_PATH deploy create --spec-file spec.yml --deployment-dir deployment
|
|
||||||
laconic-so deployment --dir deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
- `zenith-karma-stack` - Karma watcher deployment
|
|
||||||
- `urbit-stack` - Fake Urbit ship for testing
|
|
||||||
- `zenith-desk-stack` - Desk deployment stack
|
|
||||||
|
|
||||||
## Architecture: k8s-kind Deployments
|
|
||||||
|
|
||||||
### One Cluster Per Host
|
|
||||||
One Kind cluster per host by design. Never request or expect separate clusters.
|
|
||||||
|
|
||||||
- `create_cluster()` in `helpers.py` reuses any existing cluster
|
|
||||||
- `cluster-id` in deployment.yml is an identifier, not a cluster request
|
|
||||||
- All deployments share: ingress controller, etcd, certificates
|
|
||||||
|
|
||||||
### Stack Resolution
|
|
||||||
- External stacks detected via `Path(stack).exists()` in `util.py`
|
|
||||||
- Config/compose resolution: external path first, then internal fallback
|
|
||||||
- External path structure: `stack_orchestrator/data/stacks/<name>/stack.yml`
|
|
||||||
|
|
||||||
### Secret Generation Implementation
|
|
||||||
- `GENERATE_TOKEN_PATTERN` in `deployment_create.py` matches `$generate:type:length$`
|
|
||||||
- `_generate_and_store_secrets()` creates K8s Secret
|
|
||||||
- `cluster_info.py` adds `envFrom` with `secretRef` to containers
|
|
||||||
- Non-secret config written to `config.env`
|
|
||||||
|
|
||||||
### Repository Cloning
|
|
||||||
`setup-repositories --git-ssh` clones repos defined in stack.yml's `repos:` field. Requires SSH agent.
|
|
||||||
|
|
||||||
### Key Files (for codebase navigation)
|
|
||||||
- `repos/setup_repositories.py`: `setup-repositories` command (git clone)
|
|
||||||
- `deployment_create.py`: `deploy create` command, secret generation
|
|
||||||
- `deployment.py`: `deployment start/stop/restart` commands
|
|
||||||
- `deploy_k8s.py`: K8s deployer, cluster management calls
|
|
||||||
- `helpers.py`: `create_cluster()`, etcd cleanup, kind operations
|
|
||||||
- `cluster_info.py`: K8s resource generation (Deployment, Service, Ingress)
|
|
||||||
|
|
||||||
## Insights and Observations
|
|
||||||
|
|
||||||
### Design Principles
|
|
||||||
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
|
||||||
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
|
||||||
55
README.md
55
README.md
@ -71,59 +71,6 @@ The various [stacks](/stack_orchestrator/data/stacks) each contain instructions
|
|||||||
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded)
|
- [laconicd with console and CLI](stack_orchestrator/data/stacks/fixturenet-laconic-loaded)
|
||||||
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo)
|
- [kubo (IPFS)](stack_orchestrator/data/stacks/kubo)
|
||||||
|
|
||||||
## Deployment Types
|
|
||||||
|
|
||||||
- **compose**: Docker Compose on local machine
|
|
||||||
- **k8s**: External Kubernetes cluster (requires kubeconfig)
|
|
||||||
- **k8s-kind**: Local Kubernetes via Kind - one cluster per host, shared by all deployments
|
|
||||||
|
|
||||||
## External Stacks
|
|
||||||
|
|
||||||
Stacks can live in external git repositories. Required structure:
|
|
||||||
|
|
||||||
```
|
|
||||||
<repo>/
|
|
||||||
stack_orchestrator/data/
|
|
||||||
stacks/<stack-name>/stack.yml
|
|
||||||
compose/docker-compose-<pod-name>.yml
|
|
||||||
deployment/spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deployment Commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Create deployment from spec
|
|
||||||
laconic-so --stack <path> deploy create --spec-file <spec.yml> --deployment-dir <dir>
|
|
||||||
|
|
||||||
# Start (creates cluster on first run)
|
|
||||||
laconic-so deployment --dir <dir> start
|
|
||||||
|
|
||||||
# GitOps restart (git pull + redeploy, preserves data)
|
|
||||||
laconic-so deployment --dir <dir> restart
|
|
||||||
|
|
||||||
# Stop
|
|
||||||
laconic-so deployment --dir <dir> stop
|
|
||||||
```
|
|
||||||
|
|
||||||
## spec.yml Reference
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
stack: stack-name-or-path
|
|
||||||
deploy-to: k8s-kind
|
|
||||||
network:
|
|
||||||
http-proxy:
|
|
||||||
- host-name: app.example.com
|
|
||||||
routes:
|
|
||||||
- path: /
|
|
||||||
proxy-to: service-name:port
|
|
||||||
acme-email: admin@example.com
|
|
||||||
config:
|
|
||||||
ENV_VAR: value
|
|
||||||
SECRET_VAR: $generate:hex:32$ # Auto-generated, stored in K8s Secret
|
|
||||||
volumes:
|
|
||||||
volume-name:
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
||||||
@ -131,3 +78,5 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,413 +0,0 @@
|
|||||||
# Implementing `laconic-so create-stack` Command
|
|
||||||
|
|
||||||
A plan for adding a new CLI command to scaffold stack files automatically.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Add a `create-stack` command that generates all required files for a new stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --name my-stack --type webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output:**
|
|
||||||
```
|
|
||||||
stack_orchestrator/data/
|
|
||||||
├── stacks/my-stack/stack.yml
|
|
||||||
├── container-build/cerc-my-stack/
|
|
||||||
│ ├── Dockerfile
|
|
||||||
│ └── build.sh
|
|
||||||
└── compose/docker-compose-my-stack.yml
|
|
||||||
|
|
||||||
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CLI Architecture Summary
|
|
||||||
|
|
||||||
### Command Registration Pattern
|
|
||||||
|
|
||||||
Commands are Click functions registered in `main.py`:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# main.py (line ~70)
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Global Options Access
|
|
||||||
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print("message")
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print("(would create files)")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Key Utilities
|
|
||||||
|
|
||||||
| Function | Location | Purpose |
|
|
||||||
|----------|----------|---------|
|
|
||||||
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
|
||||||
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
|
||||||
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files to Create
|
|
||||||
|
|
||||||
### 1. Command Module
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/__init__.py`**
|
|
||||||
```python
|
|
||||||
# Empty file to make this a package
|
|
||||||
```
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/create_stack.py`**
|
|
||||||
```python
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import copy
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import error_exit, get_yaml
|
|
||||||
|
|
||||||
# Template types
|
|
||||||
STACK_TEMPLATES = {
|
|
||||||
"webapp": {
|
|
||||||
"description": "Web application with Node.js",
|
|
||||||
"base_image": "node:20-bullseye-slim",
|
|
||||||
"port": 3000,
|
|
||||||
},
|
|
||||||
"service": {
|
|
||||||
"description": "Backend service",
|
|
||||||
"base_image": "python:3.11-slim",
|
|
||||||
"port": 8080,
|
|
||||||
},
|
|
||||||
"empty": {
|
|
||||||
"description": "Minimal stack with no defaults",
|
|
||||||
"base_image": None,
|
|
||||||
"port": None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_data_dir() -> Path:
|
|
||||||
"""Get path to stack_orchestrator/data directory"""
|
|
||||||
return Path(__file__).absolute().parent.parent.joinpath("data")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_stack_name(name: str) -> None:
|
|
||||||
"""Validate stack name follows conventions"""
|
|
||||||
import re
|
|
||||||
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
|
||||||
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
|
||||||
if name.startswith("cerc-"):
|
|
||||||
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
|
||||||
|
|
||||||
|
|
||||||
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
|
||||||
"""Create stack.yml file"""
|
|
||||||
config = {
|
|
||||||
"version": "1.2",
|
|
||||||
"name": name,
|
|
||||||
"description": template.get("description", f"Stack: {name}"),
|
|
||||||
"repos": [repo_url] if repo_url else [],
|
|
||||||
"containers": [f"cerc/{name}"],
|
|
||||||
"pods": [name],
|
|
||||||
}
|
|
||||||
|
|
||||||
stack_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(stack_dir / "stack.yml", "w") as f:
|
|
||||||
get_yaml().dump(config, f)
|
|
||||||
|
|
||||||
|
|
||||||
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create Dockerfile"""
|
|
||||||
base_image = template.get("base_image", "node:20-bullseye-slim")
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
dockerfile_content = f'''# Build stage
|
|
||||||
FROM {base_image} AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci
|
|
||||||
COPY . .
|
|
||||||
RUN npm run build
|
|
||||||
|
|
||||||
# Production stage
|
|
||||||
FROM {base_image}
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci --only=production
|
|
||||||
COPY --from=builder /app/dist ./dist
|
|
||||||
|
|
||||||
EXPOSE {port}
|
|
||||||
CMD ["npm", "run", "start"]
|
|
||||||
'''
|
|
||||||
|
|
||||||
container_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(container_dir / "Dockerfile", "w") as f:
|
|
||||||
f.write(dockerfile_content)
|
|
||||||
|
|
||||||
|
|
||||||
def create_build_script(container_dir: Path, name: str) -> None:
|
|
||||||
"""Create build.sh script"""
|
|
||||||
build_script = f'''#!/usr/bin/env bash
|
|
||||||
# Build cerc/{name}
|
|
||||||
|
|
||||||
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
|
||||||
|
|
||||||
docker build -t cerc/{name}:local \\
|
|
||||||
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
|
||||||
${{build_command_args}} \\
|
|
||||||
${{CERC_REPO_BASE_DIR}}/{name}
|
|
||||||
'''
|
|
||||||
|
|
||||||
build_path = container_dir / "build.sh"
|
|
||||||
with open(build_path, "w") as f:
|
|
||||||
f.write(build_script)
|
|
||||||
|
|
||||||
# Make executable
|
|
||||||
os.chmod(build_path, 0o755)
|
|
||||||
|
|
||||||
|
|
||||||
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create docker-compose file"""
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
compose_content = {
|
|
||||||
"version": "3.8",
|
|
||||||
"services": {
|
|
||||||
name: {
|
|
||||||
"image": f"cerc/{name}:local",
|
|
||||||
"restart": "unless-stopped",
|
|
||||||
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
|
||||||
"environment": {
|
|
||||||
"NODE_ENV": "${NODE_ENV:-production}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
|
||||||
get_yaml().dump(compose_content, f)
|
|
||||||
|
|
||||||
|
|
||||||
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
|
||||||
"""Add entry to a list file if not already present"""
|
|
||||||
list_path = data_dir / filename
|
|
||||||
|
|
||||||
# Read existing entries
|
|
||||||
existing = set()
|
|
||||||
if list_path.exists():
|
|
||||||
with open(list_path, "r") as f:
|
|
||||||
existing = set(line.strip() for line in f if line.strip())
|
|
||||||
|
|
||||||
# Add new entry
|
|
||||||
if entry not in existing:
|
|
||||||
with open(list_path, "a") as f:
|
|
||||||
f.write(f"{entry}\n")
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
|
||||||
@click.option("--type", "stack_type", default="webapp",
|
|
||||||
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
|
||||||
help="Stack template type")
|
|
||||||
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
|
||||||
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
|
||||||
@click.pass_context
|
|
||||||
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
|
||||||
"""Create a new stack with all required files.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-app --type webapp
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
|
||||||
"""
|
|
||||||
# Validate
|
|
||||||
validate_stack_name(name)
|
|
||||||
|
|
||||||
template = STACK_TEMPLATES[stack_type]
|
|
||||||
data_dir = get_data_dir()
|
|
||||||
|
|
||||||
# Define paths
|
|
||||||
stack_dir = data_dir / "stacks" / name
|
|
||||||
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
|
||||||
compose_dir = data_dir / "compose"
|
|
||||||
|
|
||||||
# Check for existing files
|
|
||||||
if not force:
|
|
||||||
if stack_dir.exists():
|
|
||||||
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
|
||||||
if container_dir.exists():
|
|
||||||
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
|
||||||
|
|
||||||
# Dry run check
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Would create stack '{name}' with template '{stack_type}':")
|
|
||||||
print(f" - {stack_dir}/stack.yml")
|
|
||||||
print(f" - {container_dir}/Dockerfile")
|
|
||||||
print(f" - {container_dir}/build.sh")
|
|
||||||
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
print(f" - Update repository-list.txt")
|
|
||||||
print(f" - Update container-image-list.txt")
|
|
||||||
print(f" - Update pod-list.txt")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Create files
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
|
||||||
|
|
||||||
create_stack_yml(stack_dir, name, template, repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {stack_dir}/stack.yml")
|
|
||||||
|
|
||||||
create_dockerfile(container_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/Dockerfile")
|
|
||||||
|
|
||||||
create_build_script(container_dir, name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/build.sh")
|
|
||||||
|
|
||||||
create_compose_file(compose_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
|
|
||||||
# Update list files
|
|
||||||
if repo:
|
|
||||||
update_list_file(data_dir, "repository-list.txt", repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {repo} to repository-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added cerc/{name} to container-image-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "pod-list.txt", name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {name} to pod-list.txt")
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"\nStack '{name}' created successfully!")
|
|
||||||
print(f"\nNext steps:")
|
|
||||||
print(f" 1. Edit {stack_dir}/stack.yml")
|
|
||||||
print(f" 2. Customize {container_dir}/Dockerfile")
|
|
||||||
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
|
||||||
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Register Command in main.py
|
|
||||||
|
|
||||||
**Edit `stack_orchestrator/main.py`**
|
|
||||||
|
|
||||||
Add import:
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
```
|
|
||||||
|
|
||||||
Add command registration (after line ~78):
|
|
||||||
```python
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Implementation Steps
|
|
||||||
|
|
||||||
### Step 1: Create module structure
|
|
||||||
```bash
|
|
||||||
mkdir -p stack_orchestrator/create
|
|
||||||
touch stack_orchestrator/create/__init__.py
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Create the command file
|
|
||||||
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
|
||||||
|
|
||||||
### Step 3: Register in main.py
|
|
||||||
Add the import and `cli.add_command()` line.
|
|
||||||
|
|
||||||
### Step 4: Test the command
|
|
||||||
```bash
|
|
||||||
# Show help
|
|
||||||
laconic-so create-stack --help
|
|
||||||
|
|
||||||
# Dry run
|
|
||||||
laconic-so --dry-run create-stack --name test-app --type webapp
|
|
||||||
|
|
||||||
# Create a stack
|
|
||||||
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
ls -la stack_orchestrator/data/stacks/test-app/
|
|
||||||
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| `service` | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| `empty` | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Future Enhancements
|
|
||||||
|
|
||||||
1. **Interactive mode** - Prompt for values if not provided
|
|
||||||
2. **More templates** - Go, Rust, database stacks
|
|
||||||
3. **Template from existing** - `--from-stack existing-stack`
|
|
||||||
4. **External stack support** - Create in custom directory
|
|
||||||
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files Modified
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --name verify-test --type webapp
|
|
||||||
|
|
||||||
# 3. Full creation works
|
|
||||||
laconic-so create-stack --name verify-test --type webapp
|
|
||||||
ls stack_orchestrator/data/stacks/verify-test/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
|
|
||||||
# 4. Build works
|
|
||||||
laconic-so --stack verify-test build-containers
|
|
||||||
|
|
||||||
# 5. Cleanup
|
|
||||||
rm -rf stack_orchestrator/data/stacks/verify-test
|
|
||||||
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
|
||||||
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
```
|
|
||||||
35
TODO.md
35
TODO.md
@ -1,35 +0,0 @@
|
|||||||
# TODO
|
|
||||||
|
|
||||||
## Features Needed
|
|
||||||
|
|
||||||
### Update Stack Command
|
|
||||||
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
|
||||||
|
|
||||||
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
|
||||||
|
|
||||||
## Bugs
|
|
||||||
|
|
||||||
### `deploy create` doesn't auto-generate volume mappings for new pods
|
|
||||||
|
|
||||||
When a new pod is added to `stack.yml` (e.g. `monitoring`), `deploy create`
|
|
||||||
does not generate default host path mappings in spec.yml for the new pod's
|
|
||||||
volumes. The deployment then fails at scheduling because the PVCs don't exist.
|
|
||||||
|
|
||||||
**Expected**: `deploy create` enumerates all volumes from all compose files
|
|
||||||
in the stack and generates default host paths for any that aren't already
|
|
||||||
mapped in the spec.yml `volumes:` section.
|
|
||||||
|
|
||||||
**Actual**: Only volumes already in spec.yml get PVs. New volumes are silently
|
|
||||||
missing, causing `FailedScheduling: persistentvolumeclaim not found`.
|
|
||||||
|
|
||||||
**Workaround**: Manually add volume entries to spec.yml and create host dirs.
|
|
||||||
|
|
||||||
**Files**: `deployment_create.py` (`_write_config_file`, volume handling)
|
|
||||||
|
|
||||||
## Architecture Refactoring
|
|
||||||
|
|
||||||
### Separate Deployer from Stack Orchestrator CLI
|
|
||||||
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
|
||||||
|
|
||||||
### Separate Stacks from Stack Orchestrator Repo
|
|
||||||
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
|
||||||
124
docs/cli.md
124
docs/cli.md
@ -65,127 +65,3 @@ Force full rebuild of packages:
|
|||||||
```
|
```
|
||||||
$ laconic-so build-npms --include <package-name> --force-rebuild
|
$ laconic-so build-npms --include <package-name> --force-rebuild
|
||||||
```
|
```
|
||||||
|
|
||||||
## deploy
|
|
||||||
|
|
||||||
The `deploy` command group manages persistent deployments. The general workflow is `deploy init` to generate a spec file, then `deploy create` to create a deployment directory from the spec, then runtime commands like `deployment start` and `deployment stop`.
|
|
||||||
|
|
||||||
### deploy init
|
|
||||||
|
|
||||||
Generate a deployment spec file from a stack definition:
|
|
||||||
```
|
|
||||||
$ laconic-so --stack <stack-name> deploy init --output <spec-file>
|
|
||||||
```
|
|
||||||
|
|
||||||
Options:
|
|
||||||
- `--output` (required): write spec file here
|
|
||||||
- `--config`: provide config variables for the deployment
|
|
||||||
- `--config-file`: provide config variables in a file
|
|
||||||
- `--kube-config`: provide a config file for a k8s deployment
|
|
||||||
- `--image-registry`: provide a container image registry url for this k8s cluster
|
|
||||||
- `--map-ports-to-host`: map ports to the host (`any-variable-random`, `localhost-same`, `any-same`, `localhost-fixed-random`, `any-fixed-random`)
|
|
||||||
|
|
||||||
### deploy create
|
|
||||||
|
|
||||||
Create a deployment directory from a spec file:
|
|
||||||
```
|
|
||||||
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir>
|
|
||||||
```
|
|
||||||
|
|
||||||
Update an existing deployment in-place (preserving data volumes and env file):
|
|
||||||
```
|
|
||||||
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir> --update
|
|
||||||
```
|
|
||||||
|
|
||||||
Options:
|
|
||||||
- `--spec-file` (required): spec file to use
|
|
||||||
- `--deployment-dir`: target directory for deployment files
|
|
||||||
- `--update`: update an existing deployment directory, preserving data volumes and env file. Changed files are backed up with a `.bak` suffix. The deployment's `config.env` and `deployment.yml` are also preserved.
|
|
||||||
- `--helm-chart`: generate Helm chart instead of deploying (k8s only)
|
|
||||||
- `--network-dir`: network configuration supplied in this directory
|
|
||||||
- `--initial-peers`: initial set of persistent peers
|
|
||||||
|
|
||||||
## deployment
|
|
||||||
|
|
||||||
Runtime commands for managing a created deployment. Use `--dir` to specify the deployment directory.
|
|
||||||
|
|
||||||
### deployment start
|
|
||||||
|
|
||||||
Start a deployment (`up` is a legacy alias):
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> start
|
|
||||||
```
|
|
||||||
|
|
||||||
Options:
|
|
||||||
- `--stay-attached` / `--detatch-terminal`: attach to container stdout (default: detach)
|
|
||||||
- `--skip-cluster-management` / `--perform-cluster-management`: skip kind cluster creation/teardown (default: perform management). Only affects k8s-kind deployments. Use this when multiple stacks share a single cluster.
|
|
||||||
|
|
||||||
### deployment stop
|
|
||||||
|
|
||||||
Stop a deployment (`down` is a legacy alias):
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> stop
|
|
||||||
```
|
|
||||||
|
|
||||||
Options:
|
|
||||||
- `--delete-volumes` / `--preserve-volumes`: delete data volumes on stop (default: preserve)
|
|
||||||
- `--skip-cluster-management` / `--perform-cluster-management`: skip kind cluster teardown (default: perform management). Use this to stop a single deployment without destroying a shared cluster.
|
|
||||||
|
|
||||||
### deployment restart
|
|
||||||
|
|
||||||
Restart a deployment with GitOps-aware workflow. Pulls latest stack code, syncs the deployment directory from the git-tracked spec, and restarts services:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> restart
|
|
||||||
```
|
|
||||||
|
|
||||||
See [deployment_patterns.md](deployment_patterns.md) for the recommended GitOps workflow.
|
|
||||||
|
|
||||||
### deployment ps
|
|
||||||
|
|
||||||
Show running services:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> ps
|
|
||||||
```
|
|
||||||
|
|
||||||
### deployment logs
|
|
||||||
|
|
||||||
View service logs:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> logs
|
|
||||||
```
|
|
||||||
Use `-f` to follow and `-n <count>` to tail.
|
|
||||||
|
|
||||||
### deployment exec
|
|
||||||
|
|
||||||
Execute a command in a running service container:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> exec <service-name> "<command>"
|
|
||||||
```
|
|
||||||
|
|
||||||
### deployment status
|
|
||||||
|
|
||||||
Show deployment status:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> status
|
|
||||||
```
|
|
||||||
|
|
||||||
### deployment port
|
|
||||||
|
|
||||||
Show mapped ports for a service:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> port <service-name> <port>
|
|
||||||
```
|
|
||||||
|
|
||||||
### deployment push-images
|
|
||||||
|
|
||||||
Push deployment images to a registry:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> push-images
|
|
||||||
```
|
|
||||||
|
|
||||||
### deployment run-job
|
|
||||||
|
|
||||||
Run a one-time job in the deployment:
|
|
||||||
```
|
|
||||||
$ laconic-so deployment --dir <deployment-dir> run-job <job-name>
|
|
||||||
```
|
|
||||||
|
|||||||
@ -1,202 +0,0 @@
|
|||||||
# Deployment Patterns
|
|
||||||
|
|
||||||
## GitOps Pattern
|
|
||||||
|
|
||||||
For production deployments, we recommend a GitOps approach where your deployment configuration is tracked in version control.
|
|
||||||
|
|
||||||
### Overview
|
|
||||||
|
|
||||||
- **spec.yml is your source of truth**: Maintain it in your operator repository
|
|
||||||
- **Don't regenerate on every restart**: Run `deploy init` once, then customize and commit
|
|
||||||
- **Use restart for updates**: The restart command respects your git-tracked spec.yml
|
|
||||||
|
|
||||||
### Workflow
|
|
||||||
|
|
||||||
1. **Initial setup**: Run `deploy init` once to generate a spec.yml template
|
|
||||||
2. **Customize and commit**: Edit spec.yml with your configuration (hostnames, resources, etc.) and commit to your operator repo
|
|
||||||
3. **Deploy from git**: Use the committed spec.yml for deployments
|
|
||||||
4. **Update via git**: Make changes in git, then restart to apply
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initial setup (run once)
|
|
||||||
laconic-so --stack my-stack deploy init --output spec.yml
|
|
||||||
|
|
||||||
# Customize for your environment
|
|
||||||
vim spec.yml # Set hostname, resources, etc.
|
|
||||||
|
|
||||||
# Commit to your operator repository
|
|
||||||
git add spec.yml
|
|
||||||
git commit -m "Add my-stack deployment configuration"
|
|
||||||
git push
|
|
||||||
|
|
||||||
# On deployment server: deploy from git-tracked spec
|
|
||||||
laconic-so --stack my-stack deploy create \
|
|
||||||
--spec-file /path/to/operator-repo/spec.yml \
|
|
||||||
--deployment-dir my-deployment
|
|
||||||
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Updating Deployments
|
|
||||||
|
|
||||||
When you need to update a deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Make changes in your operator repo
|
|
||||||
vim /path/to/operator-repo/spec.yml
|
|
||||||
git commit -am "Update configuration"
|
|
||||||
git push
|
|
||||||
|
|
||||||
# 2. On deployment server: pull and restart
|
|
||||||
cd /path/to/operator-repo && git pull
|
|
||||||
laconic-so deployment --dir my-deployment restart
|
|
||||||
```
|
|
||||||
|
|
||||||
The `restart` command:
|
|
||||||
- Pulls latest code from the stack repository
|
|
||||||
- Uses your git-tracked spec.yml (does NOT regenerate from defaults)
|
|
||||||
- Syncs the deployment directory
|
|
||||||
- Restarts services
|
|
||||||
|
|
||||||
### Anti-patterns
|
|
||||||
|
|
||||||
**Don't do this:**
|
|
||||||
```bash
|
|
||||||
# BAD: Regenerating spec on every deployment
|
|
||||||
laconic-so --stack my-stack deploy init --output spec.yml
|
|
||||||
laconic-so deploy create --spec-file spec.yml ...
|
|
||||||
```
|
|
||||||
|
|
||||||
This overwrites your customizations with defaults from the stack's `commands.py`.
|
|
||||||
|
|
||||||
**Do this instead:**
|
|
||||||
```bash
|
|
||||||
# GOOD: Use your git-tracked spec
|
|
||||||
git pull # Get latest spec.yml from your operator repo
|
|
||||||
laconic-so deployment --dir my-deployment restart
|
|
||||||
```
|
|
||||||
|
|
||||||
## Private Registry Authentication
|
|
||||||
|
|
||||||
For deployments using images from private container registries (e.g., GitHub Container Registry), configure authentication in your spec.yml:
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
Add a `registry-credentials` section to your spec.yml:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
registry-credentials:
|
|
||||||
server: ghcr.io
|
|
||||||
username: your-org-or-username
|
|
||||||
token-env: REGISTRY_TOKEN
|
|
||||||
```
|
|
||||||
|
|
||||||
**Fields:**
|
|
||||||
- `server`: The registry hostname (e.g., `ghcr.io`, `docker.io`, `gcr.io`)
|
|
||||||
- `username`: Registry username (for GHCR, use your GitHub username or org name)
|
|
||||||
- `token-env`: Name of the environment variable containing your API token/PAT
|
|
||||||
|
|
||||||
### Token Environment Variable
|
|
||||||
|
|
||||||
The `token-env` pattern keeps credentials out of version control. Set the environment variable when running `deployment start`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export REGISTRY_TOKEN="your-personal-access-token"
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
For GHCR, create a Personal Access Token (PAT) with `read:packages` scope.
|
|
||||||
|
|
||||||
### Ansible Integration
|
|
||||||
|
|
||||||
When using Ansible for deployments, pass the token from a credentials file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Start deployment
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: laconic-so deployment --dir {{ deployment_dir }} start
|
|
||||||
environment:
|
|
||||||
REGISTRY_TOKEN: "{{ lookup('file', '~/.credentials/ghcr_token') }}"
|
|
||||||
```
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
1. laconic-so reads the `registry-credentials` config from spec.yml
|
|
||||||
2. Creates a Kubernetes `docker-registry` secret named `{deployment}-registry`
|
|
||||||
3. The deployment's pods reference this secret for image pulls
|
|
||||||
|
|
||||||
## Cluster and Volume Management
|
|
||||||
|
|
||||||
### Stopping Deployments
|
|
||||||
|
|
||||||
The `deployment stop` command has two important flags:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Default: stops deployment, deletes cluster, PRESERVES volumes
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Explicitly delete volumes (USE WITH CAUTION)
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volume Persistence
|
|
||||||
|
|
||||||
Volumes persist across cluster deletion by design. This is important because:
|
|
||||||
- **Data survives cluster recreation**: Ledger data, databases, and other state are preserved
|
|
||||||
- **Faster recovery**: No need to re-sync or rebuild data after cluster issues
|
|
||||||
- **Safe cluster upgrades**: Delete and recreate cluster without data loss
|
|
||||||
|
|
||||||
**Only use `--delete-volumes` when:**
|
|
||||||
- You explicitly want to start fresh with no data
|
|
||||||
- The user specifically requests volume deletion
|
|
||||||
- You're cleaning up a test/dev environment completely
|
|
||||||
|
|
||||||
### Shared Cluster Architecture
|
|
||||||
|
|
||||||
In kind deployments, multiple stacks share a single cluster:
|
|
||||||
- First `deployment start` creates the cluster
|
|
||||||
- Subsequent deployments reuse the existing cluster
|
|
||||||
- `deployment stop` on ANY deployment deletes the shared cluster
|
|
||||||
- Other deployments will fail until cluster is recreated
|
|
||||||
|
|
||||||
To stop a single deployment without affecting the cluster:
|
|
||||||
```bash
|
|
||||||
laconic-so deployment --dir my-deployment stop --skip-cluster-management
|
|
||||||
```
|
|
||||||
|
|
||||||
## Volume Persistence in k8s-kind
|
|
||||||
|
|
||||||
k8s-kind has 3 storage layers:
|
|
||||||
|
|
||||||
- **Docker Host**: The physical server running Docker
|
|
||||||
- **Kind Node**: A Docker container simulating a k8s node
|
|
||||||
- **Pod Container**: Your workload
|
|
||||||
|
|
||||||
For k8s-kind, volumes with paths are mounted from Docker Host → Kind Node → Pod via extraMounts.
|
|
||||||
|
|
||||||
| spec.yml volume | Storage Location | Survives Pod Restart | Survives Cluster Restart |
|
|
||||||
|-----------------|------------------|---------------------|-------------------------|
|
|
||||||
| `vol:` (empty) | Kind Node PVC | ✅ | ❌ |
|
|
||||||
| `vol: ./data/x` | Docker Host | ✅ | ✅ |
|
|
||||||
| `vol: /abs/path`| Docker Host | ✅ | ✅ |
|
|
||||||
|
|
||||||
**Recommendation**: Always use paths for data you want to keep. Relative paths
|
|
||||||
(e.g., `./data/rpc-config`) resolve to `$DEPLOYMENT_DIR/data/rpc-config` on the
|
|
||||||
Docker Host.
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# In spec.yml
|
|
||||||
volumes:
|
|
||||||
rpc-config: ./data/rpc-config # Persists to $DEPLOYMENT_DIR/data/rpc-config
|
|
||||||
chain-data: ./data/chain # Persists to $DEPLOYMENT_DIR/data/chain
|
|
||||||
temp-cache: # Empty = Kind Node PVC (lost on cluster delete)
|
|
||||||
```
|
|
||||||
|
|
||||||
### The Antipattern
|
|
||||||
|
|
||||||
Empty-path volumes appear persistent because they survive pod restarts (data lives
|
|
||||||
in Kind Node container). However, this data is lost when the kind cluster is
|
|
||||||
recreated. This "false persistence" has caused data loss when operators assumed
|
|
||||||
their data was safe.
|
|
||||||
@ -1,550 +0,0 @@
|
|||||||
# Docker Compose Deployment Guide
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
### What is a Deployer?
|
|
||||||
|
|
||||||
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
|
||||||
|
|
||||||
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To deploy stacks using Docker Compose, you need:
|
|
||||||
|
|
||||||
- Docker Engine (20.10+)
|
|
||||||
- Docker Compose plugin (v2.0+)
|
|
||||||
- Python 3.8+
|
|
||||||
- stack-orchestrator installed (`laconic-so`)
|
|
||||||
|
|
||||||
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
|
||||||
|
|
||||||
## Deployment Workflow
|
|
||||||
|
|
||||||
The typical deployment workflow consists of four main steps:
|
|
||||||
|
|
||||||
1. **Setup repositories and build containers** (first time only)
|
|
||||||
2. **Initialize deployment specification**
|
|
||||||
3. **Create deployment directory**
|
|
||||||
4. **Start and manage services**
|
|
||||||
|
|
||||||
## Quick Start Example
|
|
||||||
|
|
||||||
Here's a complete example using the built-in `test` stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Step 1: Setup (first time only)
|
|
||||||
laconic-so --stack test setup-repositories
|
|
||||||
laconic-so --stack test build-containers
|
|
||||||
|
|
||||||
# Step 2: Initialize deployment spec
|
|
||||||
laconic-so --stack test deploy init --output test-spec.yml
|
|
||||||
|
|
||||||
# Step 3: Create deployment directory
|
|
||||||
laconic-so --stack test deploy create \
|
|
||||||
--spec-file test-spec.yml \
|
|
||||||
--deployment-dir test-deployment
|
|
||||||
|
|
||||||
# Step 4: Start services
|
|
||||||
laconic-so deployment --dir test-deployment start
|
|
||||||
|
|
||||||
# View running services
|
|
||||||
laconic-so deployment --dir test-deployment ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir test-deployment logs
|
|
||||||
|
|
||||||
# Stop services (preserves data)
|
|
||||||
laconic-so deployment --dir test-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
Stack-orchestrator supports two deployment workflows:
|
|
||||||
|
|
||||||
### 1. Deployment Directory Workflow (Recommended)
|
|
||||||
|
|
||||||
This workflow creates a persistent deployment directory that contains all configuration and data.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Production deployments
|
|
||||||
- When you need to preserve configuration
|
|
||||||
- When you want to manage multiple deployments
|
|
||||||
- When you need persistent volume data
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Optionally edit eth-spec.yml to customize configuration
|
|
||||||
|
|
||||||
# Create deployment directory
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir my-eth-deployment
|
|
||||||
|
|
||||||
# Start the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment start
|
|
||||||
|
|
||||||
# Manage the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment ps
|
|
||||||
laconic-so deployment --dir my-eth-deployment logs
|
|
||||||
laconic-so deployment --dir my-eth-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Quick Deploy Workflow
|
|
||||||
|
|
||||||
This workflow deploys directly without creating a persistent deployment directory.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Quick testing
|
|
||||||
- Temporary deployments
|
|
||||||
- Simple stacks that don't require customization
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start the stack directly
|
|
||||||
laconic-so --stack test deploy up
|
|
||||||
|
|
||||||
# Check service status
|
|
||||||
laconic-so --stack test deploy port test 80
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack test deploy logs
|
|
||||||
|
|
||||||
# Stop (preserves volumes)
|
|
||||||
laconic-so --stack test deploy down
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so --stack test deploy down --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Real-World Example: Ethereum Fixturenet
|
|
||||||
|
|
||||||
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Setup (first time only)
|
|
||||||
laconic-so --stack fixturenet-eth setup-repositories
|
|
||||||
laconic-so --stack fixturenet-eth build-containers
|
|
||||||
|
|
||||||
# Initialize with default configuration
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Create deployment
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir fixturenet-eth-deployment
|
|
||||||
|
|
||||||
# Start the network
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment ps
|
|
||||||
|
|
||||||
# Access logs from specific service
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
|
||||||
|
|
||||||
# Stop the network (preserves blockchain data)
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop
|
|
||||||
|
|
||||||
# Start again - blockchain data is preserved
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Clean up everything including data
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Passing Configuration Parameters
|
|
||||||
|
|
||||||
Configuration can be passed in three ways:
|
|
||||||
|
|
||||||
**1. At init time via `--config` flag:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack test deploy init --output spec.yml \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**2. Edit the spec file after init:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize
|
|
||||||
laconic-so --stack test deploy init --output spec.yml
|
|
||||||
|
|
||||||
# Edit spec.yml
|
|
||||||
vim spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Example spec.yml:
|
|
||||||
```yaml
|
|
||||||
stack: test
|
|
||||||
config:
|
|
||||||
PARAM1: value1
|
|
||||||
PARAM2: value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**3. Docker Compose defaults:**
|
|
||||||
|
|
||||||
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
|
||||||
|
|
||||||
### Port Mapping
|
|
||||||
|
|
||||||
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Find the host port for container port 80 on service 'webapp'
|
|
||||||
laconic-so deployment --dir my-deployment port webapp 80
|
|
||||||
|
|
||||||
# Output example: 0.0.0.0:32768
|
|
||||||
```
|
|
||||||
|
|
||||||
To configure fixed ports, edit the spec file before creating the deployment:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8080:80' # Maps host port 8080 to container port 80
|
|
||||||
api:
|
|
||||||
- '3000:3000'
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create the deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack my-stack deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volume Persistence
|
|
||||||
|
|
||||||
Volumes are preserved between stop/start cycles by default:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop but keep data
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Start again - data is still there
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
To completely remove all data:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop and delete all volumes
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
Volume data is stored in `<deployment-dir>/data/`.
|
|
||||||
|
|
||||||
## Common Operations
|
|
||||||
|
|
||||||
### Viewing Logs
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# All services, continuous follow
|
|
||||||
laconic-so deployment --dir my-deployment logs --follow
|
|
||||||
|
|
||||||
# Last 100 lines from all services
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 100
|
|
||||||
|
|
||||||
# Specific service only
|
|
||||||
laconic-so deployment --dir my-deployment logs webapp
|
|
||||||
|
|
||||||
# Combine options
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
### Executing Commands in Containers
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Execute a command in a running service
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp ls -la
|
|
||||||
|
|
||||||
# Interactive shell
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
|
||||||
|
|
||||||
# Run command with specific environment variables
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
|
||||||
```
|
|
||||||
|
|
||||||
### Checking Service Status
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# List all running services
|
|
||||||
laconic-so deployment --dir my-deployment ps
|
|
||||||
|
|
||||||
# Check using Docker directly
|
|
||||||
docker ps
|
|
||||||
```
|
|
||||||
|
|
||||||
### Updating a Running Deployment
|
|
||||||
|
|
||||||
If you need to change configuration after deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Edit the spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
|
|
||||||
# 2. Regenerate configuration
|
|
||||||
laconic-so deployment --dir my-deployment update
|
|
||||||
|
|
||||||
# 3. Restart services to apply changes
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
## Multi-Service Deployments
|
|
||||||
|
|
||||||
Many stacks deploy multiple services that work together:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Deploy a stack with multiple services
|
|
||||||
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
|
||||||
laconic-so --stack laconicd-with-console deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir laconicd-deployment
|
|
||||||
|
|
||||||
laconic-so deployment --dir laconicd-deployment start
|
|
||||||
|
|
||||||
# View all services
|
|
||||||
laconic-so deployment --dir laconicd-deployment ps
|
|
||||||
|
|
||||||
# View logs from specific services
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs laconicd
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs console
|
|
||||||
```
|
|
||||||
|
|
||||||
## ConfigMaps
|
|
||||||
|
|
||||||
ConfigMaps allow you to mount configuration files into containers:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Create the config directory in your deployment
|
|
||||||
mkdir -p my-deployment/data/my-config
|
|
||||||
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
|
||||||
|
|
||||||
# 2. Reference in spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Add to spec.yml:
|
|
||||||
```yaml
|
|
||||||
configmaps:
|
|
||||||
my-config: ./data/my-config
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 3. Restart to apply
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
|
||||||
|
|
||||||
## Deployment Directory Structure
|
|
||||||
|
|
||||||
A typical deployment directory contains:
|
|
||||||
|
|
||||||
```
|
|
||||||
my-deployment/
|
|
||||||
├── compose/
|
|
||||||
│ └── docker-compose-*.yml # Generated compose files
|
|
||||||
├── config.env # Environment variables
|
|
||||||
├── deployment.yml # Deployment metadata
|
|
||||||
├── spec.yml # Deployment specification
|
|
||||||
└── data/ # Volume mounts and configs
|
|
||||||
├── service-data/ # Persistent service data
|
|
||||||
└── config-maps/ # ConfigMap files
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
**Problem: "Cannot connect to Docker daemon"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Ensure Docker is running
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Start Docker if needed (macOS)
|
|
||||||
open -a Docker
|
|
||||||
|
|
||||||
# Start Docker (Linux)
|
|
||||||
sudo systemctl start docker
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Port already in use"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Either stop the conflicting service or use different ports
|
|
||||||
# Edit spec.yml before creating deployment:
|
|
||||||
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8081:80' # Use 8081 instead of 8080
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Image not found"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build containers first
|
|
||||||
laconic-so --stack your-stack build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Volumes not persisting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check if you used --delete-volumes when stopping
|
|
||||||
# Volume data is in: <deployment-dir>/data/
|
|
||||||
|
|
||||||
# Don't use --delete-volumes if you want to keep data:
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Only use --delete-volumes when you want to reset completely:
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Services not starting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check logs for errors
|
|
||||||
laconic-so deployment --dir my-deployment logs
|
|
||||||
|
|
||||||
# Check Docker container status
|
|
||||||
docker ps -a
|
|
||||||
|
|
||||||
# Try stopping and starting again
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Inspecting Deployment State
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check deployment directory structure
|
|
||||||
ls -la my-deployment/
|
|
||||||
|
|
||||||
# Check running containers
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Check container details
|
|
||||||
docker inspect <container-name>
|
|
||||||
|
|
||||||
# Check networks
|
|
||||||
docker network ls
|
|
||||||
|
|
||||||
# Check volumes
|
|
||||||
docker volume ls
|
|
||||||
```
|
|
||||||
|
|
||||||
## CLI Commands Reference
|
|
||||||
|
|
||||||
### Stack Operations
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone required repositories
|
|
||||||
laconic-so --stack <name> setup-repositories
|
|
||||||
|
|
||||||
# Build container images
|
|
||||||
laconic-so --stack <name> build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Initialization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec with defaults
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file>
|
|
||||||
|
|
||||||
# Initialize with configuration
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file> \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Creation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Create deployment directory from spec
|
|
||||||
laconic-so --stack <name> deploy create \
|
|
||||||
--spec-file <spec-file> \
|
|
||||||
--deployment-dir <dir>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Management
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start all services
|
|
||||||
laconic-so deployment --dir <dir> start
|
|
||||||
|
|
||||||
# Stop services (preserves volumes)
|
|
||||||
laconic-so deployment --dir <dir> stop
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so deployment --dir <dir> stop --delete-volumes
|
|
||||||
|
|
||||||
# List running services
|
|
||||||
laconic-so deployment --dir <dir> ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
|
||||||
|
|
||||||
# Show mapped port
|
|
||||||
laconic-so deployment --dir <dir> port <service> <private-port>
|
|
||||||
|
|
||||||
# Execute command in service
|
|
||||||
laconic-so deployment --dir <dir> exec <service> <command>
|
|
||||||
|
|
||||||
# Update configuration
|
|
||||||
laconic-so deployment --dir <dir> update
|
|
||||||
```
|
|
||||||
|
|
||||||
### Quick Deploy Commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start stack directly
|
|
||||||
laconic-so --stack <name> deploy up
|
|
||||||
|
|
||||||
# Stop stack
|
|
||||||
laconic-so --stack <name> deploy down [--delete-volumes]
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack <name> deploy logs
|
|
||||||
|
|
||||||
# Show port mapping
|
|
||||||
laconic-so --stack <name> deploy port <service> <port>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Related Documentation
|
|
||||||
|
|
||||||
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
|
||||||
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
|
||||||
- [Specification](./spec.md) - Internal structure and design
|
|
||||||
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
|
||||||
- [Web App Deployment](./webapp.md) - Deploying web applications
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
For more examples, see the test scripts:
|
|
||||||
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
|
||||||
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
|
||||||
|
|
||||||
## Summary
|
|
||||||
|
|
||||||
- Docker Compose is the default and recommended deployment mode
|
|
||||||
- Two workflows: deployment directory (recommended) or quick deploy
|
|
||||||
- The standard workflow is: setup → build → init → create → start
|
|
||||||
- Configuration is flexible with multiple override layers
|
|
||||||
- Volume persistence is automatic unless explicitly deleted
|
|
||||||
- All deployment state is contained in the deployment directory
|
|
||||||
- For Kubernetes deployments, see separate K8s documentation
|
|
||||||
|
|
||||||
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
|
||||||
@ -1,113 +0,0 @@
|
|||||||
# Helm Chart Generation
|
|
||||||
|
|
||||||
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Install Kompose:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Linux
|
|
||||||
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
|
||||||
chmod +x kompose
|
|
||||||
sudo mv kompose /usr/local/bin/
|
|
||||||
|
|
||||||
# macOS
|
|
||||||
brew install kompose
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
kompose version
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### 1. Create spec file
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Generate Helm chart
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment \
|
|
||||||
--helm-chart
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Deploy to Kubernetes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
helm install my-release my-deployment/chart
|
|
||||||
kubectl get pods -n zenith
|
|
||||||
```
|
|
||||||
|
|
||||||
## Output Structure
|
|
||||||
|
|
||||||
```bash
|
|
||||||
my-deployment/
|
|
||||||
├── spec.yml # Reference
|
|
||||||
├── stack.yml # Reference
|
|
||||||
└── chart/ # Helm chart
|
|
||||||
├── Chart.yaml
|
|
||||||
├── README.md
|
|
||||||
└── templates/
|
|
||||||
└── *.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Generate chart for stage1-zenithd
|
|
||||||
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output stage1-spec.yml
|
|
||||||
|
|
||||||
laconic-so --stack stage1-zenithd deploy create \
|
|
||||||
--spec-file stage1-spec.yml \
|
|
||||||
--deployment-dir stage1-deployment \
|
|
||||||
--helm-chart
|
|
||||||
|
|
||||||
# Deploy
|
|
||||||
helm install stage1-zenithd stage1-deployment/chart
|
|
||||||
```
|
|
||||||
|
|
||||||
## Production Deployment (TODO)
|
|
||||||
|
|
||||||
### Local Development
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Access services using port-forward
|
|
||||||
kubectl port-forward service/zenithd 26657:26657
|
|
||||||
kubectl port-forward service/nginx-api-proxy 1317:80
|
|
||||||
kubectl port-forward service/cosmos-explorer 4173:4173
|
|
||||||
```
|
|
||||||
|
|
||||||
### Production Access Options
|
|
||||||
|
|
||||||
- Option 1: Ingress + cert-manager (Recommended)
|
|
||||||
- Install ingress-nginx + cert-manager
|
|
||||||
- Point DNS to cluster LoadBalancer IP
|
|
||||||
- Auto-provisions Let's Encrypt TLS certs
|
|
||||||
- Access: `https://api.zenith.example.com`
|
|
||||||
- Option 2: Cloud LoadBalancer
|
|
||||||
- Use cloud provider's LoadBalancer service type
|
|
||||||
- Point DNS to assigned external IP
|
|
||||||
- Manual TLS cert management
|
|
||||||
- Option 3: Bare Metal (MetalLB + Ingress)
|
|
||||||
- MetalLB provides LoadBalancer IPs from local network
|
|
||||||
- Same Ingress setup as cloud
|
|
||||||
- Option 4: NodePort + External Proxy
|
|
||||||
- Expose services on 30000-32767 range
|
|
||||||
- External nginx/Caddy proxies 80/443 → NodePort
|
|
||||||
- Manual cert management
|
|
||||||
|
|
||||||
### Changes Needed
|
|
||||||
|
|
||||||
- Add Ingress template to charts
|
|
||||||
- Add TLS configuration to values.yaml
|
|
||||||
- Document cert-manager setup
|
|
||||||
- Add production deployment guide
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# K8S Deployment Enhancements
|
|
||||||
## Controlling pod placement
|
|
||||||
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
|
||||||
### Pod/Node Affinity
|
|
||||||
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
|
||||||
|
|
||||||
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-affinities:
|
|
||||||
- label: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
|
||||||
### Node Taint Toleration
|
|
||||||
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
|
||||||
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
|
||||||
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
|
||||||
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
|
||||||
|
|
||||||
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-tolerations:
|
|
||||||
- key: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
|
||||||
@ -26,3 +26,4 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -1,128 +0,0 @@
|
|||||||
# Deploying to the Laconic Network
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
|
||||||
|
|
||||||
## Key Documentation in stack-orchestrator
|
|
||||||
|
|
||||||
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
|
||||||
- `docs/webapp.md` - Webapp building/running
|
|
||||||
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
|
||||||
|
|
||||||
## Core Concepts
|
|
||||||
|
|
||||||
### LRN (Laconic Resource Name)
|
|
||||||
Format: `lrn://laconic/[namespace]/[name]`
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
- `lrn://laconic/deployers/my-deployer-name`
|
|
||||||
- `lrn://laconic/dns/example.com`
|
|
||||||
- `lrn://laconic/deployments/example.com`
|
|
||||||
|
|
||||||
### Registry Record Types
|
|
||||||
|
|
||||||
| Record Type | Purpose |
|
|
||||||
|-------------|---------|
|
|
||||||
| `ApplicationRecord` | Published app metadata |
|
|
||||||
| `WebappDeployer` | Deployment service offering |
|
|
||||||
| `ApplicationDeploymentRequest` | User's request to deploy |
|
|
||||||
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
|
||||||
| `ApplicationDeploymentRecord` | Completed deployment result |
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
### 1. Direct Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentRequest
|
|
||||||
→ targets specific WebappDeployer (by LRN)
|
|
||||||
→ includes payment TX hash
|
|
||||||
→ Deployer picks up request, builds, deploys, publishes result
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Auction-Based Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentAuction
|
|
||||||
→ Deployers bid (commit/reveal phases)
|
|
||||||
→ Winner selected
|
|
||||||
→ User publishes request targeting winner
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key CLI Commands
|
|
||||||
|
|
||||||
### Publish a Deployer Service
|
|
||||||
```bash
|
|
||||||
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
|
||||||
--api-url https://deployer-api.example.com \
|
|
||||||
--name my-deployer \
|
|
||||||
--payment-address laconic1... \
|
|
||||||
--minimum-payment 1000alnt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Request Deployment (User Side)
|
|
||||||
```bash
|
|
||||||
laconic-so request-webapp-deployment --laconic-config config.yml \
|
|
||||||
--app lrn://laconic/apps/my-app \
|
|
||||||
--deployer lrn://laconic/deployers/xyz \
|
|
||||||
--make-payment auto
|
|
||||||
```
|
|
||||||
|
|
||||||
### Run Deployer Service (Deployer Side)
|
|
||||||
```bash
|
|
||||||
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
|
||||||
```
|
|
||||||
|
|
||||||
## Laconic Config File
|
|
||||||
|
|
||||||
All tools require a laconic config file (`laconic.toml`):
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[cosmos]
|
|
||||||
address_prefix = "laconic"
|
|
||||||
chain_id = "laconic_9000-1"
|
|
||||||
endpoint = "http://localhost:26657"
|
|
||||||
key = "<account-name>"
|
|
||||||
password = "<account-password>"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting Up a Local Laconicd Network
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone and build
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy create
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key Implementation Files
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `publish_webapp_deployer.py` | Register deployment service on network |
|
|
||||||
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
|
||||||
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
|
||||||
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
|
||||||
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
|
||||||
| `request_webapp_undeployment.py` | Request app removal |
|
|
||||||
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
|
||||||
| `util.py` | LaconicRegistryClient - all registry interactions |
|
|
||||||
|
|
||||||
## Payment System
|
|
||||||
|
|
||||||
- **Token Denom**: `alnt` (Laconic network tokens)
|
|
||||||
- **Payment Options**:
|
|
||||||
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
|
||||||
- `--use-payment`: Reference existing payment TX
|
|
||||||
|
|
||||||
## What's NOT Well-Documented
|
|
||||||
|
|
||||||
1. No end-to-end tutorial for full deployment workflow
|
|
||||||
2. Stack publishing (vs webapp) process unclear
|
|
||||||
3. LRN naming conventions not formally specified
|
|
||||||
4. Payment economics and token mechanics
|
|
||||||
110
pyproject.toml
110
pyproject.toml
@ -1,110 +0,0 @@
|
|||||||
[build-system]
|
|
||||||
requires = ["setuptools>=61.0", "wheel"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
|
||||||
|
|
||||||
[project]
|
|
||||||
name = "laconic-stack-orchestrator"
|
|
||||||
version = "1.1.0"
|
|
||||||
description = "Orchestrates deployment of the Laconic stack"
|
|
||||||
readme = "README.md"
|
|
||||||
license = {text = "GNU Affero General Public License"}
|
|
||||||
authors = [
|
|
||||||
{name = "Cerc", email = "info@cerc.io"}
|
|
||||||
]
|
|
||||||
requires-python = ">=3.8"
|
|
||||||
classifiers = [
|
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
]
|
|
||||||
dependencies = [
|
|
||||||
"python-decouple>=3.8",
|
|
||||||
"python-dotenv==1.0.0",
|
|
||||||
"GitPython>=3.1.32",
|
|
||||||
"tqdm>=4.65.0",
|
|
||||||
"python-on-whales>=0.64.0",
|
|
||||||
"click>=8.1.6",
|
|
||||||
"PyYAML>=6.0.1",
|
|
||||||
"ruamel.yaml>=0.17.32",
|
|
||||||
"pydantic==1.10.9",
|
|
||||||
"tomli==2.0.1",
|
|
||||||
"validators==0.22.0",
|
|
||||||
"kubernetes>=28.1.0",
|
|
||||||
"humanfriendly>=10.0",
|
|
||||||
"python-gnupg>=0.5.2",
|
|
||||||
"requests>=2.3.2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
|
||||||
dev = [
|
|
||||||
"pytest>=7.0.0",
|
|
||||||
"pytest-cov>=4.0.0",
|
|
||||||
"black>=22.0.0",
|
|
||||||
"flake8>=5.0.0",
|
|
||||||
"pyright>=1.1.0",
|
|
||||||
"yamllint>=1.28.0",
|
|
||||||
"pre-commit>=3.0.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
laconic-so = "stack_orchestrator.main:cli"
|
|
||||||
|
|
||||||
[project.urls]
|
|
||||||
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
|
||||||
where = ["."]
|
|
||||||
|
|
||||||
[tool.setuptools.package-data]
|
|
||||||
"*" = ["data/**"]
|
|
||||||
|
|
||||||
[tool.black]
|
|
||||||
line-length = 88
|
|
||||||
target-version = ['py38']
|
|
||||||
|
|
||||||
[tool.flake8]
|
|
||||||
max-line-length = 88
|
|
||||||
extend-ignore = ["E203", "W503", "E402"]
|
|
||||||
|
|
||||||
[tool.pyright]
|
|
||||||
pythonVersion = "3.9"
|
|
||||||
typeCheckingMode = "basic"
|
|
||||||
reportMissingImports = "none"
|
|
||||||
reportMissingModuleSource = "none"
|
|
||||||
reportUnusedImport = "error"
|
|
||||||
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
|
||||||
exclude = ["**/build/**", "**/__pycache__/**"]
|
|
||||||
|
|
||||||
[tool.mypy]
|
|
||||||
python_version = "3.8"
|
|
||||||
warn_return_any = true
|
|
||||||
warn_unused_configs = true
|
|
||||||
disallow_untyped_defs = true
|
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
|
||||||
testpaths = ["tests"]
|
|
||||||
python_files = ["test_*.py"]
|
|
||||||
python_classes = ["Test*"]
|
|
||||||
python_functions = ["test_*"]
|
|
||||||
markers = [
|
|
||||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
|
||||||
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
|
||||||
]
|
|
||||||
addopts = [
|
|
||||||
"--cov",
|
|
||||||
"--cov-report=term-missing",
|
|
||||||
"--cov-report=html",
|
|
||||||
"--strict-markers",
|
|
||||||
]
|
|
||||||
asyncio_default_fixture_loop_scope = "function"
|
|
||||||
|
|
||||||
[tool.coverage.run]
|
|
||||||
source = ["stack_orchestrator"]
|
|
||||||
disable_warnings = ["couldnt-parse"]
|
|
||||||
|
|
||||||
[tool.coverage.report]
|
|
||||||
exclude_lines = [
|
|
||||||
"pragma: no cover",
|
|
||||||
"def __repr__",
|
|
||||||
"raise AssertionError",
|
|
||||||
"raise NotImplementedError",
|
|
||||||
]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"pythonVersion": "3.9",
|
|
||||||
"typeCheckingMode": "basic",
|
|
||||||
"reportMissingImports": "none",
|
|
||||||
"reportMissingModuleSource": "none",
|
|
||||||
"reportUnusedImport": "error",
|
|
||||||
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
|
||||||
"exclude": ["**/build/**", "**/__pycache__/**"]
|
|
||||||
}
|
|
||||||
@ -11,5 +11,3 @@ tomli==2.0.1
|
|||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
humanfriendly>=10.0
|
||||||
python-gnupg>=0.5.2
|
|
||||||
requests>=2.3.2
|
|
||||||
|
|||||||
26
setup.py
26
setup.py
@ -1,7 +1,5 @@
|
|||||||
# See
|
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
@ -9,26 +7,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name="laconic-stack-orchestrator",
|
name='laconic-stack-orchestrator',
|
||||||
version=version,
|
version=version,
|
||||||
author="Cerc",
|
author='Cerc',
|
||||||
author_email="info@cerc.io",
|
author_email='info@cerc.io',
|
||||||
license="GNU Affero General Public License",
|
license='GNU Affero General Public License',
|
||||||
description="Orchestrates deployment of the Laconic stack",
|
description='Orchestrates deployment of the Laconic stack',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
||||||
py_modules=["stack_orchestrator"],
|
py_modules=['stack_orchestrator'],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires=">=3.7",
|
python_requires='>=3.7',
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={"": ["data/**"]},
|
package_data={'': ['data/**']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
||||||
},
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,10 +23,11 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return default_stack(config, stack)
|
return base_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -40,27 +41,15 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class default_stack(base_stack):
|
|
||||||
"""Default stack implementation for stacks without specific handling."""
|
|
||||||
|
|
||||||
def ensure_available(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_url(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class package_registry_stack(base_stack):
|
||||||
|
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(
|
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
||||||
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
|
||||||
f"{url_from_environment}"
|
|
||||||
)
|
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -73,16 +62,10 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the
|
# If not, print a message about how to start it and return fail to the caller
|
||||||
# caller
|
print("ERROR: The package-registry stack is not running, and no external registry "
|
||||||
print(
|
"specified with CERC_NPM_REGISTRY_URL")
|
||||||
"ERROR: The package-registry stack is not running, "
|
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
||||||
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"ERROR: Start the local package registry with: "
|
|
||||||
"laconic-so --stack package-registry deploy-system up"
|
|
||||||
)
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -93,9 +76,7 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = (
|
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
||||||
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
"CERC_NPM_AUTH_TOKEN", default=None
|
||||||
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
|
||||||
)
|
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -35,17 +34,14 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars:
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(
|
def make_container_build_env(dev_root_path: str,
|
||||||
dev_root_path: str,
|
|
||||||
container_build_dir: str,
|
container_build_dir: str,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
force_rebuild: bool,
|
force_rebuild: bool,
|
||||||
extra_build_args: str,
|
extra_build_args: str):
|
||||||
):
|
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -54,15 +50,11 @@ def make_container_build_env(
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update(
|
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -75,18 +67,12 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update(
|
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
||||||
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
||||||
"container-build"
|
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
||||||
)
|
|
||||||
temp_build_dir = container_parent_dir.joinpath(
|
|
||||||
build_context.container.replace("/", "-")
|
|
||||||
)
|
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -104,34 +90,21 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"No script file found: {build_script_filename}, using default build script")
|
||||||
f"No script file found: {build_script_filename}, "
|
repo_dir = build_context.container.split('/')[1]
|
||||||
"using default build script"
|
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
||||||
)
|
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
||||||
repo_dir = build_context.container.split("/")[1]
|
|
||||||
# TODO: make this less of a hack -- should be specified in
|
|
||||||
# some metadata somewhere. Check if we have a repo for this
|
|
||||||
# container. If not, set the context dir to container-build subdir
|
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = (
|
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
build_command = os.path.join(build_context.container_build_dir,
|
||||||
)
|
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
build_command = (
|
|
||||||
os.path.join(build_context.container_build_dir, "default-build.sh")
|
|
||||||
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
|
||||||
)
|
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
||||||
f"Executing: {build_command} with environment: "
|
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
||||||
f"{build_context.container_build_env}"
|
|
||||||
)
|
|
||||||
build_result = subprocess.run(
|
|
||||||
build_command, shell=True, env=build_context.container_build_env
|
|
||||||
)
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -144,61 +117,33 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these containers")
|
@click.option('--include', help="only build these containers")
|
||||||
@click.option("--exclude", help="don't build these containers")
|
@click.option('--exclude', help="don\'t build these containers")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option(
|
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
||||||
"--publish-images",
|
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Publish the built images in the specified image registry",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", help="Specify the image registry for --publish-images"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
||||||
ctx,
|
'''build the set of containers required for a complete stack'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_rebuild,
|
|
||||||
extra_build_args,
|
|
||||||
publish_images,
|
|
||||||
image_registry,
|
|
||||||
):
|
|
||||||
"""build the set of containers required for a complete stack"""
|
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -206,22 +151,21 @@ def command(
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(
|
container_build_env = make_container_build_env(dev_root_path,
|
||||||
dev_root_path,
|
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
opts.o.debug,
|
opts.o.debug,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
extra_build_args,
|
extra_build_args)
|
||||||
)
|
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path,
|
dev_root_path
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -230,16 +174,10 @@ def command(
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit(
|
error_exit("container build failed and --continue-on-error not set, exiting")
|
||||||
"container build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Container Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Container Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,18 +32,14 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these packages")
|
@click.option('--include', help="only build these packages")
|
||||||
@click.option("--exclude", help="don't build these packages")
|
@click.option('--exclude', help="don\'t build these packages")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False,
|
||||||
"--force-rebuild",
|
help="Override existing target package version check -- force rebuild")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override existing target package version check -- force rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
"""build the set of npm packages required for a complete stack"""
|
'''build the set of npm packages required for a complete stack'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -70,53 +66,44 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Build root directory doesn't exist, creating")
|
print('Build root directory doesn\'t exist, creating')
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "npm-package-list.txt"
|
|
||||||
) as package_list_file:
|
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config["npms"]
|
packages_in_scope = stack_config['npms']
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Packages: {packages_in_scope}")
|
print(f'Packages: {packages_in_scope}')
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating
|
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
||||||
# JS tooling file changes back into the cloned repo
|
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -129,63 +116,41 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = [
|
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
"cd /workspace && "
|
|
||||||
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
|
||||||
]
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
# so for now we use dict.update:
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
||||||
envs = {
|
|
||||||
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
|
||||||
# Convention used by our web app packages
|
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
|
||||||
}
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update(
|
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
docker.run(
|
docker.run(builder_js_image_name,
|
||||||
builder_js_image_name,
|
|
||||||
remove=True,
|
remove=True,
|
||||||
interactive=True,
|
interactive=True,
|
||||||
tty=True,
|
tty=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
envs=envs,
|
envs=envs,
|
||||||
# TODO: detect this host name in npm_registry_url
|
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
||||||
# rather than hard-wiring it
|
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
command=build_command,
|
command=build_command
|
||||||
)
|
)
|
||||||
# Note that although the docs say that build_result should
|
# Note that although the docs say that build_result should contain
|
||||||
# contain the command output as a string, in reality it is
|
# the command output as a string, in reality it is always the empty string.
|
||||||
# always the empty string. Since we detect errors via catching
|
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
||||||
# exceptions below, we can safely ignore it here.
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print(
|
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
||||||
"FATAL Error: build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -203,12 +168,6 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(
|
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
||||||
f"FATAL: builder image: {builder_js_image_name} is required "
|
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
||||||
"but was not found"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"Please run this command to create it: "
|
|
||||||
"laconic-so --stack build-support build-containers"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -26,3 +26,4 @@ class BuildContext:
|
|||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str,str]
|
container_build_env: Mapping[str,str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,23 +20,21 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config["containers"]
|
containers_in_scope = stack_config['containers']
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "container-image-list.txt"
|
|
||||||
) as container_list_file:
|
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Containers: {containers_in_scope}")
|
print(f'Containers: {containers_in_scope}')
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -33,55 +32,40 @@ from stack_orchestrator.build.build_types import BuildContext
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--base-container")
|
@click.option('--base-container')
|
||||||
@click.option(
|
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||||
"--source-repo", help="directory containing the webapp to build", required=True
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
"""build the specified webapp container"""
|
'''build the specified webapp container'''
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
|
quiet = ctx.obj.quiet
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(
|
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Dev Root is: {dev_root_path}")
|
logger.log(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
force_rebuild, extra_build_args)
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -101,13 +85,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script,
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
# but with a different Dockerfile and work dir.
|
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||||
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
base_container.replace("/", "-"),
|
||||||
)
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,8 +52,7 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET \
|
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -61,9 +60,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(
|
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
||||||
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
|
||||||
)
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -71,10 +68,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(
|
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
||||||
f"failed to fetch tags from image registry, "
|
|
||||||
f"status code: {response.status_code}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -85,9 +79,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(
|
def _filter_for_platform(container: str,
|
||||||
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
registry_info: RegistryInfo,
|
||||||
) -> List[str]:
|
tag_list: List[str]) -> List[str] :
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -143,44 +137,21 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only fetch these containers")
|
@click.option('--include', help="only fetch these containers")
|
||||||
@click.option("--exclude", help="don't fetch these containers")
|
@click.option('--exclude', help="don\'t fetch these containers")
|
||||||
@click.option(
|
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
||||||
"--force-local-overwrite",
|
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
||||||
is_flag=True,
|
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
||||||
default=False,
|
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
||||||
help="Overwrite a locally built image, if present",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-username", required=True, help="Specify the image registry username"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-token", required=True, help="Specify the image registry access token"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
||||||
ctx,
|
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_local_overwrite,
|
|
||||||
image_registry,
|
|
||||||
registry_username,
|
|
||||||
registry_token,
|
|
||||||
):
|
|
||||||
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(
|
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
||||||
registry_info.registry,
|
|
||||||
registry_info.registry_username,
|
|
||||||
registry_info.registry_token,
|
|
||||||
)
|
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -201,24 +172,19 @@ def command(
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if _exists_locally(container):
|
if (_exists_locally(container)):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if force_local_overwrite:
|
if (force_local_overwrite):
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Warning: overwriting local tag from this image: {container} because "
|
||||||
f"Warning: overwriting local tag from this image: "
|
"--force-local-overwrite was specified")
|
||||||
f"{container} because --force-local-overwrite was specified"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Skipping local tagging for this image: {container} because that would "
|
||||||
f"Skipping local tagging for this image: {container} "
|
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
||||||
"because that would overwrite an existing :local tagged "
|
|
||||||
"image, use --force-local-overwrite to do so."
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -226,7 +192,4 @@ def command(
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print(
|
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
||||||
"Warning: couldn't find usable images for one or more containers, "
|
|
||||||
"this stack will not deploy"
|
|
||||||
)
|
|
||||||
|
|||||||
@ -29,20 +29,10 @@ network_key = "network"
|
|||||||
http_proxy_key = "http-proxy"
|
http_proxy_key = "http-proxy"
|
||||||
image_registry_key = "image-registry"
|
image_registry_key = "image-registry"
|
||||||
configmaps_key = "configmaps"
|
configmaps_key = "configmaps"
|
||||||
secrets_key = "secrets"
|
|
||||||
resources_key = "resources"
|
resources_key = "resources"
|
||||||
volumes_key = "volumes"
|
volumes_key = "volumes"
|
||||||
security_key = "security"
|
security_key = "security"
|
||||||
annotations_key = "annotations"
|
annotations_key = "annotations"
|
||||||
labels_key = "labels"
|
labels_key = "labels"
|
||||||
replicas_key = "replicas"
|
|
||||||
node_affinities_key = "node-affinities"
|
|
||||||
node_tolerations_key = "node-tolerations"
|
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
cri_base_filename = "cri-base.json"
|
|
||||||
unlimited_memlock_key = "unlimited-memlock"
|
|
||||||
runtime_class_key = "runtime-class"
|
|
||||||
high_memlock_runtime = "high-memlock"
|
|
||||||
high_memlock_spec_filename = "high-memlock-spec.json"
|
|
||||||
acme_email_key = "acme-email"
|
|
||||||
|
|||||||
@ -1,5 +0,0 @@
|
|||||||
services:
|
|
||||||
test-job:
|
|
||||||
image: cerc/test-container:local
|
|
||||||
entrypoint: /bin/sh
|
|
||||||
command: ["-c", "echo 'Job completed successfully'"]
|
|
||||||
@ -14,3 +14,4 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -17,3 +17,4 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -4,5 +4,9 @@ services:
|
|||||||
ping-pub:
|
ping-pub:
|
||||||
image: cerc/ping-pub:local
|
image: cerc/ping-pub:local
|
||||||
restart: always
|
restart: always
|
||||||
|
environment:
|
||||||
|
LACONIC_LACONICD_CHAIN_ID: ${LACONIC_LACONICD_CHAIN_ID:-laconic_9000-1}
|
||||||
|
LACONIC_LACONICD_RPC_URL: ${LACONIC_LACONICD_RPC_URL:-http://localhost:26657}
|
||||||
|
LACONIC_LACONICD_API_URL: ${LACONIC_LACONICD_API_URL:-http://localhost:1317}
|
||||||
ports:
|
ports:
|
||||||
- "5173:5173"
|
- 5173
|
||||||
|
|||||||
@ -8,8 +8,6 @@ services:
|
|||||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
||||||
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
||||||
volumes:
|
volumes:
|
||||||
- ../config/test/script.sh:/opt/run.sh
|
|
||||||
- ../config/test/settings.env:/opt/settings.env
|
|
||||||
- test-data-bind:/data
|
- test-data-bind:/data
|
||||||
- test-data-auto:/data2
|
- test-data-auto:/data2
|
||||||
- test-config:/config:ro
|
- test-config:/config:ro
|
||||||
|
|||||||
@ -10,7 +10,6 @@ MONIKER="localtestnet"
|
|||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="${LOGLEVEL:-info}"
|
||||||
DENOM="alnt"
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||||
@ -34,7 +33,7 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
laconicd init $MONIKER --chain-id $CHAINID --default-denom photon
|
||||||
|
|
||||||
update_genesis() {
|
update_genesis() {
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
||||||
@ -89,13 +88,15 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Run this to allow requests from any origin
|
||||||
|
sed -i 's/cors_allowed_origins.*$/cors_allowed_origins = ["*"]/' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/enabled-unsafe-cors.*$/enabled-unsafe-cors = true/' $HOME/.laconicd/config/app.toml
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
# 10^30 alnt | 10^12 lnt
|
laconicd genesis add-genesis-account $KEY 100000000000000000000000000photon --keyring-backend $KEYRING
|
||||||
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
# 10^24 alnt | 10^6 lnt
|
laconicd genesis gentx $KEY 1000000000000000000000photon --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd genesis collect-gentxs
|
||||||
@ -110,7 +111,7 @@ fi
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1$DENOM \
|
--minimum-gas-prices=0.0001photon \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
--rpc.laddr="tcp://0.0.0.0:26657" \
|
||||||
--gql-server --gql-playground
|
--gql-server --gql-playground
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 2000000alnt
|
fees: 200000photon
|
||||||
|
|||||||
@ -29,3 +29,4 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12,10 +12,7 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = (
|
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
||||||
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
|
||||||
f"-d {db_name} -c --inserts -f {file_name}"
|
|
||||||
)
|
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 2000000alnt
|
fees: 200000photon
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ LOGLEVEL="info"
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1alnt \
|
--minimum-gas-prices=0.0001photon \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
|||||||
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
echo "Hello"
|
|
||||||
@ -1 +0,0 @@
|
|||||||
ANSWER=42
|
|
||||||
@ -940,3 +940,4 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -18,3 +18,4 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -11,8 +11,6 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data["el_premine"].items():
|
for key, value in data['el_premine'].items():
|
||||||
acct = w3.eth.account.from_mnemonic(
|
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
||||||
data["mnemonic"], account_path=key, passphrase=""
|
|
||||||
)
|
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -14,7 +14,7 @@ funds_balance=$(echo ${funds_response} | jq -r ".[0].balance[0].quantity")
|
|||||||
echo "Balance is: ${funds_balance}"
|
echo "Balance is: ${funds_balance}"
|
||||||
|
|
||||||
# Create a bond
|
# Create a bond
|
||||||
bond_create_result=$(${registry_command} bond create --type alnt --quantity 1000000000)
|
bond_create_result=$(${registry_command} bond create --type photon --quantity 1000000000)
|
||||||
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
||||||
echo "Created bond with id: ${bond_id}"
|
echo "Created bond with id: ${bond_id}"
|
||||||
|
|
||||||
|
|||||||
@ -26,14 +26,8 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
|
||||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
./build-webapp.sh || exit 1
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
if [ -f "next.config.mjs" ]; then
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
NEXT_CONFIG_JS="next.config.mjs"
|
||||||
IMPORT_OR_REQUIRE="import"
|
IMPORT_OR_REQUIRE="import"
|
||||||
|
|||||||
@ -30,13 +30,6 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
if [ -f "./run-webapp.sh" ]; then
|
|
||||||
echo "Running webapp with run-webapp.sh ..."
|
|
||||||
cd "${WORK_DIR}" || exit 1
|
|
||||||
./run-webapp.sh &
|
|
||||||
tpid=$!
|
|
||||||
wait $tpid
|
|
||||||
else
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
mv .next .next.old
|
mv .next .next.old
|
||||||
mv .next-r/.next .
|
mv .next-r/.next .
|
||||||
@ -70,4 +63,3 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
fi
|
|
||||||
|
|||||||
@ -5,3 +5,4 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN yarn
|
RUN yarn
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,5 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
||||||
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/cosmos-explorer
|
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/explorer
|
||||||
if [[ $? -ne 0 ]]; then
|
|
||||||
echo "FATAL: Base container build failed, exiting"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
||||||
|
|||||||
@ -7,15 +7,15 @@
|
|||||||
"rpc": [
|
"rpc": [
|
||||||
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
||||||
],
|
],
|
||||||
"sdk_version": "0.50.3",
|
"sdk_version": "0.45.1",
|
||||||
"coin_type": "118",
|
"coin_type": "118",
|
||||||
"min_tx_fee": "800",
|
"min_tx_fee": "800",
|
||||||
"addr_prefix": "laconic",
|
"addr_prefix": "ethm",
|
||||||
"logo": "/logos/cosmos.svg",
|
"logo": "/logos/cosmos.svg",
|
||||||
"assets": [{
|
"assets": [{
|
||||||
"base": "alnt",
|
"base": "photon",
|
||||||
"symbol": "LNT",
|
"symbol": "LNT",
|
||||||
"exponent": "18",
|
"exponent": "6",
|
||||||
"coingecko_id": "cosmos",
|
"coingecko_id": "cosmos",
|
||||||
"logo": "/logos/cosmos.svg"
|
"logo": "/logos/cosmos.svg"
|
||||||
}]
|
}]
|
||||||
|
|||||||
@ -26,6 +26,11 @@ fi
|
|||||||
# subvert this lunacy.
|
# subvert this lunacy.
|
||||||
explorer_mainnet_config_dir=/app/chains/mainnet
|
explorer_mainnet_config_dir=/app/chains/mainnet
|
||||||
explorer_testnet_config_dir=/app/chains/testnet
|
explorer_testnet_config_dir=/app/chains/testnet
|
||||||
|
|
||||||
|
# Create required directories
|
||||||
|
mkdir -p $explorer_mainnet_config_dir
|
||||||
|
mkdir -p $explorer_testnet_config_dir
|
||||||
|
|
||||||
config_template_file=/config/chains/laconic-chaindata-template.json
|
config_template_file=/config/chains/laconic-chaindata-template.json
|
||||||
chain_config_name=laconic.json
|
chain_config_name=laconic.json
|
||||||
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
||||||
|
|||||||
@ -1,6 +1,9 @@
|
|||||||
FROM alpine:latest
|
FROM ubuntu:latest
|
||||||
|
|
||||||
RUN apk add --no-cache nginx
|
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \
|
||||||
|
apt-get install -y software-properties-common && \
|
||||||
|
apt-get install -y nginx && \
|
||||||
|
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env sh
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
@ -8,14 +8,14 @@ fi
|
|||||||
echo "Test container starting"
|
echo "Test container starting"
|
||||||
|
|
||||||
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
|
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
|
||||||
if [ -n "$DATA_DEVICE" ]; then
|
if [[ -n "$DATA_DEVICE" ]]; then
|
||||||
echo "/data: MOUNTED dev=${DATA_DEVICE}"
|
echo "/data: MOUNTED dev=${DATA_DEVICE}"
|
||||||
else
|
else
|
||||||
echo "/data: not mounted"
|
echo "/data: not mounted"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
|
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
|
||||||
if [ -n "$DATA_DEVICE" ]; then
|
if [[ -n "$DATA_DEVICE" ]]; then
|
||||||
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
|
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
|
||||||
else
|
else
|
||||||
echo "/data2: not mounted"
|
echo "/data2: not mounted"
|
||||||
@ -23,7 +23,7 @@ fi
|
|||||||
|
|
||||||
# Test if the container's filesystem is old (run previously) or new
|
# Test if the container's filesystem is old (run previously) or new
|
||||||
for d in /data /data2; do
|
for d in /data /data2; do
|
||||||
if [ -f "$d/exists" ];
|
if [[ -f "$d/exists" ]];
|
||||||
then
|
then
|
||||||
TIMESTAMP=`cat $d/exists`
|
TIMESTAMP=`cat $d/exists`
|
||||||
echo "$d filesystem is old, created: $TIMESTAMP"
|
echo "$d filesystem is old, created: $TIMESTAMP"
|
||||||
@ -52,7 +52,7 @@ fi
|
|||||||
if [ -d "/config" ]; then
|
if [ -d "/config" ]; then
|
||||||
echo "/config: EXISTS"
|
echo "/config: EXISTS"
|
||||||
for f in /config/*; do
|
for f in /config/*; do
|
||||||
if [ -f "$f" ] || [ -L "$f" ]; then
|
if [[ -f "$f" ]] || [[ -L "$f" ]]; then
|
||||||
echo "$f:"
|
echo "$f:"
|
||||||
cat "$f"
|
cat "$f"
|
||||||
echo ""
|
echo ""
|
||||||
@ -64,4 +64,4 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Run nginx which will block here forever
|
# Run nginx which will block here forever
|
||||||
nginx -g "daemon off;"
|
/usr/sbin/nginx -g "daemon off;"
|
||||||
|
|||||||
@ -1,261 +0,0 @@
|
|||||||
# Caddy Ingress Controller for kind
|
|
||||||
# Based on: https://github.com/caddyserver/ingress
|
|
||||||
# Provides automatic HTTPS with Let's Encrypt
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
- endpoints
|
|
||||||
- nodes
|
|
||||||
- pods
|
|
||||||
- namespaces
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- get
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- nodes
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses/status
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingressclasses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- coordination.k8s.io
|
|
||||||
resources:
|
|
||||||
- leases
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller-configmap
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
data:
|
|
||||||
# Caddy global options
|
|
||||||
acmeCA: "https://acme-v02.api.letsencrypt.org/directory"
|
|
||||||
email: ""
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
type: NodePort
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: 80
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
- name: https
|
|
||||||
port: 443
|
|
||||||
targetPort: https
|
|
||||||
protocol: TCP
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
serviceAccountName: caddy-ingress-controller
|
|
||||||
terminationGracePeriodSeconds: 60
|
|
||||||
nodeSelector:
|
|
||||||
ingress-ready: "true"
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
tolerations:
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/master
|
|
||||||
operator: Equal
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Equal
|
|
||||||
containers:
|
|
||||||
- name: caddy-ingress-controller
|
|
||||||
image: caddy/ingress:latest
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 80
|
|
||||||
hostPort: 80
|
|
||||||
protocol: TCP
|
|
||||||
- name: https
|
|
||||||
containerPort: 443
|
|
||||||
hostPort: 443
|
|
||||||
protocol: TCP
|
|
||||||
env:
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
args:
|
|
||||||
- -config-map=caddy-system/caddy-ingress-controller-configmap
|
|
||||||
- -class-name=caddy
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 1000m
|
|
||||||
memory: 512Mi
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 9765
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 9765
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
periodSeconds: 10
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: true
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_BIND_SERVICE
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsUser: 0
|
|
||||||
runAsGroup: 0
|
|
||||||
volumeMounts:
|
|
||||||
- name: caddy-data
|
|
||||||
mountPath: /data
|
|
||||||
- name: caddy-config
|
|
||||||
mountPath: /config
|
|
||||||
volumes:
|
|
||||||
- name: caddy-data
|
|
||||||
emptyDir: {}
|
|
||||||
- name: caddy-config
|
|
||||||
emptyDir: {}
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: IngressClass
|
|
||||||
metadata:
|
|
||||||
name: caddy
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
annotations:
|
|
||||||
ingressclass.kubernetes.io/is-default-class: "true"
|
|
||||||
spec:
|
|
||||||
controller: caddy.io/ingress-controller
|
|
||||||
@ -14,3 +14,4 @@ containers:
|
|||||||
pods:
|
pods:
|
||||||
- fixturenet-blast
|
- fixturenet-blast
|
||||||
- foundry
|
- foundry
|
||||||
|
|
||||||
@ -3,3 +3,4 @@
|
|||||||
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
||||||
|
|
||||||
TODO: write me
|
TODO: write me
|
||||||
|
|
||||||
|
|||||||
@ -64,6 +64,5 @@ $ laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-
|
|||||||
Balance is: 99998999999999998999600000
|
Balance is: 99998999999999998999600000
|
||||||
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
...
|
|
||||||
```
|
```
|
||||||
The published records should be visible in the console.
|
The published record should be visible in the console.
|
||||||
|
|||||||
@ -30,3 +30,4 @@ config:
|
|||||||
cli:
|
cli:
|
||||||
key: laconicd.mykey
|
key: laconicd.mykey
|
||||||
address: laconicd.myaddress
|
address: laconicd.myaddress
|
||||||
|
|
||||||
|
|||||||
@ -14,25 +14,26 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
|
||||||
|
|
||||||
def create(context: DeploymentContext, extra_args):
|
def create(context: DeploymentContext, extra_args):
|
||||||
# Slightly modify the base fixturenet-eth compose file to replace the
|
# Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1
|
||||||
# startup script for fixturenet-eth-geth-1
|
# We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the
|
||||||
# We need to start geth with the flag to allow non eip-155 compliant
|
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
|
||||||
# transactions in order to publish the
|
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
|
||||||
# deterministic-deployment-proxy contract, which itself is a prereq for
|
|
||||||
# Optimism contract deployment
|
|
||||||
fixturenet_eth_compose_file = context.deployment_dir.joinpath(
|
|
||||||
"compose", "docker-compose-fixturenet-eth.yml"
|
|
||||||
)
|
|
||||||
|
|
||||||
new_script = "../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh"
|
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
|
||||||
|
yaml = YAML()
|
||||||
|
yaml_data = yaml.load(yaml_file)
|
||||||
|
|
||||||
def add_geth_volume(yaml_data):
|
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
|
||||||
if new_script not in yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"]:
|
|
||||||
yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"].append(new_script)
|
|
||||||
|
|
||||||
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
|
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
||||||
|
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
||||||
|
|
||||||
|
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
|
||||||
|
yaml = YAML()
|
||||||
|
yaml.dump(yaml_data, yaml_file)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@ -22,24 +22,18 @@ import yaml
|
|||||||
def create(context, extra_args):
|
def create(context, extra_args):
|
||||||
# Our goal here is just to copy the json files for blast
|
# Our goal here is just to copy the json files for blast
|
||||||
yml_path = context.deployment_dir.joinpath("spec.yml")
|
yml_path = context.deployment_dir.joinpath("spec.yml")
|
||||||
with open(yml_path, "r") as file:
|
with open(yml_path, 'r') as file:
|
||||||
data = yaml.safe_load(file)
|
data = yaml.safe_load(file)
|
||||||
|
|
||||||
mount_point = data["volumes"]["blast-data"]
|
mount_point = data['volumes']['blast-data']
|
||||||
if mount_point[0] == "/":
|
if mount_point[0] == "/":
|
||||||
deploy_dir = Path(mount_point)
|
deploy_dir = Path(mount_point)
|
||||||
else:
|
else:
|
||||||
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
||||||
|
|
||||||
command_context = extra_args[2]
|
command_context = extra_args[2]
|
||||||
compose_file = [
|
compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0]
|
||||||
f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f
|
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json")
|
||||||
][0]
|
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath(
|
|
||||||
"config", "mainnet-blast", "genesis.json"
|
|
||||||
)
|
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath(
|
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json")
|
||||||
"config", "mainnet-blast", "rollup.json"
|
|
||||||
)
|
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
|
|||||||
@ -27,8 +27,6 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath(
|
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret")
|
||||||
"data", "mainnet_eth_plugeth_config_data", "jwtsecret"
|
with open(jwt_file_path, 'w+') as jwt_file:
|
||||||
)
|
|
||||||
with open(jwt_file_path, "w+") as jwt_file:
|
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -27,8 +27,6 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath(
|
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
|
||||||
"data", "mainnet_eth_config_data", "jwtsecret"
|
with open(jwt_file_path, 'w+') as jwt_file:
|
||||||
)
|
|
||||||
with open(jwt_file_path, "w+") as jwt_file:
|
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -1 +1,2 @@
|
|||||||
# Laconic Mainnet Deployment (experimental)
|
# Laconic Mainnet Deployment (experimental)
|
||||||
|
|
||||||
|
|||||||
@ -14,10 +14,7 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import (
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
||||||
DeployCommandContext,
|
|
||||||
LaconicStackSetupCommand,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
@ -25,6 +22,7 @@ from stack_orchestrator.opts import opts
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile, copytree
|
from shutil import copyfile, copytree
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import tomli
|
import tomli
|
||||||
@ -36,9 +34,8 @@ default_spec_file_content = ""
|
|||||||
class SetupPhase(Enum):
|
class SetupPhase(Enum):
|
||||||
INITIALIZE = 1
|
INITIALIZE = 1
|
||||||
JOIN = 2
|
JOIN = 2
|
||||||
CONNECT = 3
|
CREATE = 3
|
||||||
CREATE = 4
|
ILLEGAL = 3
|
||||||
ILLEGAL = 5
|
|
||||||
|
|
||||||
|
|
||||||
def _client_toml_path(network_dir: Path):
|
def _client_toml_path(network_dir: Path):
|
||||||
@ -65,25 +62,36 @@ def _get_node_moniker_from_config(network_dir: Path):
|
|||||||
return moniker
|
return moniker
|
||||||
|
|
||||||
|
|
||||||
|
def _get_node_key_from_gentx(gentx_file_name: str):
|
||||||
|
gentx_file_path = Path(gentx_file_name)
|
||||||
|
if gentx_file_path.exists():
|
||||||
|
with open(Path(gentx_file_name), "rb") as f:
|
||||||
|
parsed_json = json.load(f)
|
||||||
|
return parsed_json['body']['messages'][0]['delegator_address']
|
||||||
|
else:
|
||||||
|
print(f"Error: gentx file: {gentx_file_name} does not exist")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _comma_delimited_to_list(list_str: str):
|
def _comma_delimited_to_list(list_str: str):
|
||||||
return list_str.split(",") if list_str else []
|
return list_str.split(",") if list_str else []
|
||||||
|
|
||||||
|
|
||||||
def _get_node_keys_from_gentx_files(gentx_address_list: str):
|
def _get_node_keys_from_gentx_files(gentx_file_list: str):
|
||||||
gentx_addresses = _comma_delimited_to_list(gentx_address_list)
|
node_keys = []
|
||||||
return gentx_addresses
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
|
for gentx_file in gentx_files:
|
||||||
|
node_key = _get_node_key_from_gentx(gentx_file)
|
||||||
|
if node_key:
|
||||||
|
node_keys.append(node_key)
|
||||||
|
return node_keys
|
||||||
|
|
||||||
|
|
||||||
def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
gentx_file_path = Path(gentx_file)
|
gentx_file_path = Path(gentx_file)
|
||||||
copyfile(
|
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
||||||
gentx_file_path,
|
|
||||||
os.path.join(
|
|
||||||
network_dir, "config", "gentx", os.path.basename(gentx_file_path)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _remove_persistent_peers(network_dir: Path):
|
def _remove_persistent_peers(network_dir: Path):
|
||||||
@ -94,13 +102,8 @@ def _remove_persistent_peers(network_dir: Path):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
||||||
replace_with = 'persistent_peers = ""'
|
replace_with = "persistent_peers = \"\""
|
||||||
config_file_content = re.sub(
|
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
||||||
persistent_peers_pattern,
|
|
||||||
replace_with,
|
|
||||||
config_file_content,
|
|
||||||
flags=re.MULTILINE,
|
|
||||||
)
|
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -113,13 +116,8 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = r'^persistent_peers = ""'
|
persistent_peers_pattern = r'^persistent_peers = ""'
|
||||||
replace_with = f'persistent_peers = "{new_persistent_peers}"'
|
replace_with = f"persistent_peers = \"{new_persistent_peers}\""
|
||||||
config_file_content = re.sub(
|
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
||||||
persistent_peers_pattern,
|
|
||||||
replace_with,
|
|
||||||
config_file_content,
|
|
||||||
flags=re.MULTILINE,
|
|
||||||
)
|
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -131,11 +129,9 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
cors_pattern = r"^cors_allowed_origins = \[]"
|
cors_pattern = r'^cors_allowed_origins = \[]'
|
||||||
replace_with = 'cors_allowed_origins = ["*"]'
|
replace_with = 'cors_allowed_origins = ["*"]'
|
||||||
config_file_content = re.sub(
|
config_file_content = re.sub(cors_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
||||||
cors_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
|
||||||
)
|
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
@ -144,46 +140,9 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(app_file_path, "r") as input_file:
|
with open(app_file_path, "r") as input_file:
|
||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
cors_pattern = r"^enabled-unsafe-cors = false"
|
cors_pattern = r'^enabled-unsafe-cors = false'
|
||||||
replace_with = "enabled-unsafe-cors = true"
|
replace_with = "enabled-unsafe-cors = true"
|
||||||
app_file_content = re.sub(
|
app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE)
|
||||||
cors_pattern, replace_with, app_file_content, flags=re.MULTILINE
|
|
||||||
)
|
|
||||||
with open(app_file_path, "w") as output_file:
|
|
||||||
output_file.write(app_file_content)
|
|
||||||
|
|
||||||
|
|
||||||
def _set_listen_address(config_dir: Path):
|
|
||||||
config_file_path = config_dir.joinpath("config.toml")
|
|
||||||
if not config_file_path.exists():
|
|
||||||
print("Error: config.toml not found")
|
|
||||||
sys.exit(1)
|
|
||||||
with open(config_file_path, "r") as input_file:
|
|
||||||
config_file_content = input_file.read()
|
|
||||||
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
|
||||||
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
|
||||||
print(f"Replacing in: {config_file_path}")
|
|
||||||
config_file_content = re.sub(
|
|
||||||
existing_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
|
||||||
)
|
|
||||||
with open(config_file_path, "w") as output_file:
|
|
||||||
output_file.write(config_file_content)
|
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
|
||||||
if not app_file_path.exists():
|
|
||||||
print("Error: app.toml not found")
|
|
||||||
sys.exit(1)
|
|
||||||
with open(app_file_path, "r") as input_file:
|
|
||||||
app_file_content = input_file.read()
|
|
||||||
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
|
||||||
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
|
||||||
app_file_content = re.sub(
|
|
||||||
existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE
|
|
||||||
)
|
|
||||||
existing_pattern2 = r'^address = "localhost:9090"'
|
|
||||||
replace_with2 = 'address = "0.0.0.0:9090"'
|
|
||||||
app_file_content = re.sub(
|
|
||||||
existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE
|
|
||||||
)
|
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
@ -192,10 +151,7 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.ILLEGAL
|
phase = SetupPhase.ILLEGAL
|
||||||
if parameters.initialize_network:
|
if parameters.initialize_network:
|
||||||
if parameters.join_network or parameters.create_network:
|
if parameters.join_network or parameters.create_network:
|
||||||
print(
|
print("Can't supply --join-network or --create-network with --initialize-network")
|
||||||
"Can't supply --join-network or --create-network "
|
|
||||||
"with --initialize-network"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not parameters.chain_id:
|
if not parameters.chain_id:
|
||||||
print("--chain-id is required")
|
print("--chain-id is required")
|
||||||
@ -207,39 +163,22 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.INITIALIZE
|
phase = SetupPhase.INITIALIZE
|
||||||
elif parameters.join_network:
|
elif parameters.join_network:
|
||||||
if parameters.initialize_network or parameters.create_network:
|
if parameters.initialize_network or parameters.create_network:
|
||||||
print(
|
print("Can't supply --initialize-network or --create-network with --join-network")
|
||||||
"Can't supply --initialize-network or --create-network "
|
|
||||||
"with --join-network"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.JOIN
|
phase = SetupPhase.JOIN
|
||||||
elif parameters.create_network:
|
elif parameters.create_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print(
|
print("Can't supply --initialize-network or --join-network with --create-network")
|
||||||
"Can't supply --initialize-network or --join-network "
|
|
||||||
"with --create-network"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CREATE
|
phase = SetupPhase.CREATE
|
||||||
elif parameters.connect_network:
|
|
||||||
if parameters.initialize_network or parameters.join_network:
|
|
||||||
print(
|
|
||||||
"Can't supply --initialize-network or --join-network "
|
|
||||||
"with --connect-network"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
phase = SetupPhase.CONNECT
|
|
||||||
return phase
|
return phase
|
||||||
|
|
||||||
|
|
||||||
def setup(
|
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
||||||
command_context: DeployCommandContext,
|
|
||||||
parameters: LaconicStackSetupCommand,
|
|
||||||
extra_args,
|
|
||||||
):
|
|
||||||
options = opts.o
|
options = opts.o
|
||||||
|
|
||||||
currency = "alnt" # Does this need to be a parameter?
|
currency = "stake" # Does this need to be a parameter?
|
||||||
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"parameters: {parameters}")
|
print(f"parameters: {parameters}")
|
||||||
@ -248,9 +187,12 @@ def setup(
|
|||||||
|
|
||||||
network_dir = Path(parameters.network_dir).absolute()
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
laconicd_home_path_in_container = "/laconicd-home"
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [VolumeMapping(str(network_dir), laconicd_home_path_in_container)]
|
mounts = [
|
||||||
|
VolumeMapping(network_dir, laconicd_home_path_in_container)
|
||||||
|
]
|
||||||
|
|
||||||
if phase == SetupPhase.INITIALIZE:
|
if phase == SetupPhase.INITIALIZE:
|
||||||
|
|
||||||
# We want to create the directory so if it exists that's an error
|
# We want to create the directory so if it exists that's an error
|
||||||
if os.path.exists(network_dir):
|
if os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} already exists")
|
print(f"Error: network directory {network_dir} already exists")
|
||||||
@ -260,18 +202,12 @@ def setup(
|
|||||||
|
|
||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
||||||
f"laconicd init {parameters.node_moniker} "
|
--chain-id {parameters.chain_id}", mounts)
|
||||||
f"--home {laconicd_home_path_in_container} "
|
|
||||||
f"--chain-id {parameters.chain_id} --default-denom {currency}",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output}")
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
elif phase == SetupPhase.JOIN:
|
elif phase == SetupPhase.JOIN:
|
||||||
# In the join phase (alternative to connect) we are participating in a
|
|
||||||
# genesis ceremony for the chain
|
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -279,147 +215,76 @@ def setup(
|
|||||||
chain_id = _get_chain_id_from_config(network_dir)
|
chain_id = _get_chain_id_from_config(network_dir)
|
||||||
|
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context,
|
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
||||||
"laconicd",
|
--keyring-backend test", mounts)
|
||||||
f"laconicd keys add {parameters.key_name} "
|
|
||||||
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis add-genesis-account {parameters.key_name} "
|
f"laconicd add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
||||||
f"12900000000000000000000{currency} "
|
--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
mounts)
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output2}")
|
print(f"Command output: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis gentx {parameters.key_name} "
|
f"laconicd gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
||||||
f"90000000000{currency} --home {laconicd_home_path_in_container} "
|
--chain-id {chain_id} --keyring-backend test",
|
||||||
f"--chain-id {chain_id} --keyring-backend test",
|
mounts)
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output3}")
|
print(f"Command output: {output3}")
|
||||||
output4, status4 = run_container_command(
|
output4, status4 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a "
|
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
mounts)
|
||||||
mounts,
|
print(f"Node validator address: {output4}")
|
||||||
)
|
|
||||||
print(f"Node account address: {output4}")
|
|
||||||
|
|
||||||
elif phase == SetupPhase.CONNECT:
|
|
||||||
# In the connect phase (named to not conflict with join) we are
|
|
||||||
# making a node that syncs a chain with existing genesis.json
|
|
||||||
# but not with validator role. We need this kind of node in order to
|
|
||||||
# bootstrap it into a validator after it syncs
|
|
||||||
output1, status1 = run_container_command(
|
|
||||||
command_context,
|
|
||||||
"laconicd",
|
|
||||||
f"laconicd keys add {parameters.key_name} "
|
|
||||||
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
|
||||||
print(f"Command output: {output1}")
|
|
||||||
output2, status2 = run_container_command(
|
|
||||||
command_context,
|
|
||||||
"laconicd",
|
|
||||||
f"laconicd keys show {parameters.key_name} -a "
|
|
||||||
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
print(f"Node account address: {output2}")
|
|
||||||
output3, status3 = run_container_command(
|
|
||||||
command_context,
|
|
||||||
"laconicd",
|
|
||||||
f"laconicd cometbft show-validator "
|
|
||||||
f"--home {laconicd_home_path_in_container}",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
print(f"Node validator address: {output3}")
|
|
||||||
|
|
||||||
elif phase == SetupPhase.CREATE:
|
elif phase == SetupPhase.CREATE:
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# In the CREATE phase, we are either a "coordinator" node,
|
# In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves
|
||||||
# generating the genesis.json file ourselves
|
# OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node.
|
||||||
# OR we are a "not-coordinator" node, consuming a genesis file from
|
|
||||||
# the coordinator node.
|
|
||||||
if parameters.genesis_file:
|
if parameters.genesis_file:
|
||||||
# We got the genesis file from elsewhere
|
# We got the genesis file from elsewhere
|
||||||
# Copy it into our network dir
|
# Copy it into our network dir
|
||||||
genesis_file_path = Path(parameters.genesis_file)
|
genesis_file_path = Path(parameters.genesis_file)
|
||||||
if not os.path.exists(genesis_file_path):
|
if not os.path.exists(genesis_file_path):
|
||||||
print(
|
print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.")
|
||||||
f"Error: supplied genesis file: {parameters.genesis_file} "
|
|
||||||
"does not exist."
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
copyfile(
|
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
||||||
genesis_file_path,
|
|
||||||
os.path.join(
|
|
||||||
network_dir, "config", os.path.basename(genesis_file_path)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# We're generating the genesis file
|
# We're generating the genesis file
|
||||||
|
if not parameters.gentx_file_list:
|
||||||
|
print("Error: --gentx-files must be supplied")
|
||||||
|
sys.exit(1)
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(
|
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_file_list)
|
||||||
parameters.gentx_address_list
|
|
||||||
)
|
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
outputk = None
|
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context,
|
command_context, "laconicd", f"laconicd add-genesis-account {other_node_key} 12900000000000000000000{currency}\
|
||||||
"laconicd",
|
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
||||||
f"laconicd genesis add-genesis-account {other_node_key} "
|
if options.debug:
|
||||||
f"12900000000000000000000{currency} "
|
|
||||||
f"--home {laconicd_home_path_in_container} "
|
|
||||||
"--keyring-backend test",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug and outputk is not None:
|
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context,
|
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
||||||
"laconicd",
|
|
||||||
f"laconicd genesis collect-gentxs "
|
|
||||||
f"--home {laconicd_home_path_in_container}",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
genesis_path = os.path.join(network_dir, "config", "genesis.json")
|
print(f"Generated genesis file, please copy to other nodes as required: \
|
||||||
print(
|
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
||||||
f"Generated genesis file, please copy to other nodes "
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
||||||
f"as required: {genesis_path}"
|
|
||||||
)
|
|
||||||
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers
|
|
||||||
# in config.toml so we remove that now
|
|
||||||
_remove_persistent_peers(network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context,
|
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
||||||
"laconicd",
|
|
||||||
f"laconicd genesis validate-genesis "
|
|
||||||
f"--home {laconicd_home_path_in_container}",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
print(f"validate-genesis result: {output2}")
|
print(f"validate-genesis result: {output2}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -438,23 +303,15 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
config_dir_path = network_dir_path.joinpath("config")
|
config_dir_path = network_dir_path.joinpath("config")
|
||||||
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
||||||
print(
|
print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}")
|
||||||
f"Error: supplied network directory does not contain "
|
|
||||||
f"a config directory: {config_dir_path}"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
data_dir_path = network_dir_path.joinpath("data")
|
data_dir_path = network_dir_path.joinpath("data")
|
||||||
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
||||||
print(
|
print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}")
|
||||||
f"Error: supplied network directory does not contain "
|
|
||||||
f"a data directory: {data_dir_path}"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Copy the network directory contents into our deployment
|
# Copy the network directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_config_dir = deployment_context.deployment_dir.joinpath(
|
deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
|
||||||
"data", "laconicd-config"
|
|
||||||
)
|
|
||||||
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
# If supplied, add the initial persistent peers to the config file
|
# If supplied, add the initial persistent peers to the config file
|
||||||
if extra_args[1]:
|
if extra_args[1]:
|
||||||
@ -462,12 +319,9 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
_insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
|
_insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
|
||||||
# Enable CORS headers so explorers and so on can talk to the node
|
# Enable CORS headers so explorers and so on can talk to the node
|
||||||
_enable_cors(deployment_config_dir)
|
_enable_cors(deployment_config_dir)
|
||||||
_set_listen_address(deployment_config_dir)
|
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = deployment_context.deployment_dir.joinpath(
|
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
||||||
"data", "laconicd-data"
|
|
||||||
)
|
|
||||||
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
@ -477,6 +331,7 @@ def init(command_context: DeployCommandContext):
|
|||||||
|
|
||||||
|
|
||||||
def get_state(command_context: DeployCommandContext):
|
def get_state(command_context: DeployCommandContext):
|
||||||
|
print("Here we get state")
|
||||||
return State.CONFIGURED
|
return State.CONFIGURED
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ repos:
|
|||||||
- git.vdb.to/cerc-io/registry-sdk
|
- git.vdb.to/cerc-io/registry-sdk
|
||||||
- git.vdb.to/cerc-io/laconic-registry-cli
|
- git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
- git.vdb.to/cerc-io/laconic-console
|
- git.vdb.to/cerc-io/laconic-console
|
||||||
- git.vdb.to/cerc-io/cosmos-explorer
|
- github.com/ping-pub/explorer
|
||||||
npms:
|
npms:
|
||||||
- registry-sdk
|
- registry-sdk
|
||||||
- laconic-registry-cli
|
- laconic-registry-cli
|
||||||
|
|||||||
@ -8,11 +8,8 @@ echo "Environment variables:"
|
|||||||
env
|
env
|
||||||
# Test laconic stack
|
# Test laconic stack
|
||||||
echo "Running laconic stack test"
|
echo "Running laconic stack test"
|
||||||
if [ "$1" == "from-path" ]; then
|
# Bit of a hack, test the most recent package
|
||||||
TEST_TARGET_SO="laconic-so"
|
|
||||||
else
|
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
fi
|
|
||||||
# Set a non-default repo dir
|
# Set a non-default repo dir
|
||||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
|||||||
@ -2,50 +2,4 @@
|
|||||||
|
|
||||||
The Package Registry Stack supports a build environment that requires a package registry (initially for NPM packages only).
|
The Package Registry Stack supports a build environment that requires a package registry (initially for NPM packages only).
|
||||||
|
|
||||||
## Setup
|
Setup instructions can be found [here](../build-support/README.md).
|
||||||
|
|
||||||
* Setup required repos and build containers:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack package-registry setup-repositories
|
|
||||||
laconic-so --stack package-registry build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
* Create a deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack package-registry deploy init --output package-registry-spec.yml
|
|
||||||
# Update port mapping in the laconic-loaded.spec file to resolve port conflicts on host if any
|
|
||||||
|
|
||||||
laconic-so --stack package-registry deploy create --deployment-dir package-registry-deployment --spec-file package-registry-spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
* Start the deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so deployment --dir package-registry-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
* The local gitea registry can now be accessed at <http://localhost:3000> (the username and password can be taken from the deployment logs)
|
|
||||||
|
|
||||||
* Configure the hostname `gitea.local`:
|
|
||||||
|
|
||||||
Update `/etc/hosts`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo nano /etc/hosts
|
|
||||||
|
|
||||||
# Add the following line
|
|
||||||
127.0.0.1 gitea.local
|
|
||||||
```
|
|
||||||
|
|
||||||
Check resolution:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ping gitea.local
|
|
||||||
|
|
||||||
PING gitea.local (127.0.0.1) 56(84) bytes of data.
|
|
||||||
64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.147 ms
|
|
||||||
64 bytes from localhost (127.0.0.1): icmp_seq=2 ttl=64 time=0.033 ms
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|||||||
@ -15,30 +15,25 @@
|
|||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
default_spec_file_content = """config:
|
default_spec_file_content = """config:
|
||||||
test_variable_1: test-value-1
|
test-variable-1: test-value-1
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# Output a known string to a know file in the bind mounted directory
|
# Output a known string to a know file in the bind mounted directory ./container-output-dir
|
||||||
# ./container-output-dir
|
|
||||||
# for test purposes -- test checks that the file was written.
|
# for test purposes -- test checks that the file was written.
|
||||||
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
||||||
host_directory = "./container-output-dir"
|
host_directory = "./container-output-dir"
|
||||||
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
||||||
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
||||||
mounts = [VolumeMapping(str(host_directory_absolute), "/data")]
|
mounts = [
|
||||||
output, status = run_container_command(
|
VolumeMapping(host_directory_absolute, "/data")
|
||||||
command_context,
|
]
|
||||||
"test",
|
output, status = run_container_command(command_context, "test", "echo output-data > /data/output-file && echo success", mounts)
|
||||||
"echo output-data > /data/output-file && echo success",
|
|
||||||
mounts,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def init(command_context: DeployCommandContext):
|
def init(command_context: DeployCommandContext):
|
||||||
@ -46,10 +41,10 @@ def init(command_context: DeployCommandContext):
|
|||||||
return yaml.load(default_spec_file_content)
|
return yaml.load(default_spec_file_content)
|
||||||
|
|
||||||
|
|
||||||
def create(deployment_context: DeploymentContext, extra_args):
|
def create(command_context: DeployCommandContext, extra_args):
|
||||||
data = "create-command-output-data"
|
data = "create-command-output-data"
|
||||||
output_file_path = deployment_context.deployment_dir.joinpath("create-file")
|
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
||||||
with open(output_file_path, "w+") as output_file:
|
with open(output_file_path, 'w+') as output_file:
|
||||||
output_file.write(data)
|
output_file.write(data)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -2,10 +2,9 @@ version: "1.0"
|
|||||||
name: test
|
name: test
|
||||||
description: "A test stack"
|
description: "A test stack"
|
||||||
repos:
|
repos:
|
||||||
|
- git.vdb.to/cerc-io/laconicd
|
||||||
- git.vdb.to/cerc-io/test-project@test-branch
|
- git.vdb.to/cerc-io/test-project@test-branch
|
||||||
containers:
|
containers:
|
||||||
- cerc/test-container
|
- cerc/test-container
|
||||||
pods:
|
pods:
|
||||||
- test
|
- test
|
||||||
jobs:
|
|
||||||
- test-job
|
|
||||||
|
|||||||
@ -14,13 +14,8 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
|
||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import (
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
||||||
Deployer,
|
|
||||||
DeployerException,
|
|
||||||
DeployerConfigGenerator,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
@ -29,33 +24,19 @@ class DockerDeployer(Deployer):
|
|||||||
name: str = "compose"
|
name: str = "compose"
|
||||||
type: str
|
type: str
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
||||||
self,
|
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
|
||||||
type: str,
|
compose_env_file=compose_env_file)
|
||||||
deployment_context: Optional[DeploymentContext],
|
|
||||||
compose_files: list,
|
|
||||||
compose_project_name: Optional[str],
|
|
||||||
compose_env_file: Optional[str],
|
|
||||||
) -> None:
|
|
||||||
self.docker = DockerClient(
|
|
||||||
compose_files=compose_files,
|
|
||||||
compose_project_name=compose_project_name,
|
|
||||||
compose_env_file=compose_env_file,
|
|
||||||
)
|
|
||||||
self.type = type
|
self.type = type
|
||||||
# Store these for later use in run_job
|
|
||||||
self.compose_files = compose_files
|
|
||||||
self.compose_project_name = compose_project_name
|
|
||||||
self.compose_env_file = compose_env_file
|
|
||||||
|
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, services):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.up(detach=detach, services=services)
|
return self.docker.compose.up(detach=detach, services=services)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def down(self, timeout, volumes, skip_cluster_management):
|
def down(self, timeout, volumes):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||||
@ -87,98 +68,35 @@ class DockerDeployer(Deployer):
|
|||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.port(
|
return self.docker.compose.port(service=service, private_port=private_port)
|
||||||
service=service, private_port=private_port
|
|
||||||
)
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def execute(self, service, command, tty, envs):
|
def execute(self, service, command, tty, envs):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.execute(
|
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
||||||
service=service, command=command, tty=tty, envs=envs
|
|
||||||
)
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.logs(
|
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
||||||
services=services, tail=tail, follow=follow, stream=stream
|
|
||||||
)
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run(
|
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||||
self,
|
|
||||||
image: str,
|
|
||||||
command=None,
|
|
||||||
user=None,
|
|
||||||
volumes=None,
|
|
||||||
entrypoint=None,
|
|
||||||
env={},
|
|
||||||
ports=[],
|
|
||||||
detach=False,
|
|
||||||
):
|
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.run(
|
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
||||||
image=image,
|
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
||||||
command=command if command else [],
|
|
||||||
user=user,
|
|
||||||
volumes=volumes,
|
|
||||||
entrypoint=entrypoint,
|
|
||||||
envs=env,
|
|
||||||
detach=detach,
|
|
||||||
publish=ports,
|
|
||||||
publish_all=len(ports) == 0,
|
|
||||||
)
|
|
||||||
except DockerException as e:
|
|
||||||
raise DeployerException(e)
|
|
||||||
|
|
||||||
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
|
||||||
# release_name is ignored for Docker deployments (only used for K8s/Helm)
|
|
||||||
if not opts.o.dry_run:
|
|
||||||
try:
|
|
||||||
# Find job compose file in compose-jobs directory
|
|
||||||
# The deployment should have compose-jobs/docker-compose-<job_name>.yml
|
|
||||||
if not self.compose_files:
|
|
||||||
raise DeployerException("No compose files configured")
|
|
||||||
|
|
||||||
# Deployment directory is parent of compose directory
|
|
||||||
compose_dir = Path(self.compose_files[0]).parent
|
|
||||||
deployment_dir = compose_dir.parent
|
|
||||||
job_compose_file = (
|
|
||||||
deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not job_compose_file.exists():
|
|
||||||
raise DeployerException(
|
|
||||||
f"Job compose file not found: {job_compose_file}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f"Running job from: {job_compose_file}")
|
|
||||||
|
|
||||||
# Create a DockerClient for the job compose file with same
|
|
||||||
# project name and env file
|
|
||||||
# This allows the job to access volumes from the main deployment
|
|
||||||
job_docker = DockerClient(
|
|
||||||
compose_files=[job_compose_file],
|
|
||||||
compose_project_name=self.compose_project_name,
|
|
||||||
compose_env_file=self.compose_env_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the job with --rm flag to remove container after completion
|
|
||||||
return job_docker.compose.run(service=job_name, remove=True, tty=True)
|
|
||||||
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
|
|
||||||
def __init__(self, type: str) -> None:
|
def __init__(self, type: str) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|||||||
@ -21,7 +21,6 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from importlib import resources
|
from importlib import resources
|
||||||
from typing import Optional
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -35,38 +34,28 @@ from stack_orchestrator.util import (
|
|||||||
get_dev_root_path,
|
get_dev_root_path,
|
||||||
stack_is_in_deployment,
|
stack_is_in_deployment,
|
||||||
resolve_compose_file,
|
resolve_compose_file,
|
||||||
get_job_list,
|
|
||||||
)
|
)
|
||||||
from stack_orchestrator.deploy.deployer import DeployerException
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||||
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer
|
|
||||||
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
||||||
from stack_orchestrator.deploy.deployment_create import init as deployment_init
|
from stack_orchestrator.deploy.deployment_create import init as deployment_init
|
||||||
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
|
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
|
||||||
from stack_orchestrator.deploy.k8s import k8s_command
|
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.option("--include", help="only start these components")
|
@click.option("--include", help="only start these components")
|
||||||
@click.option("--exclude", help="don't start these components")
|
@click.option("--exclude", help="don\'t start these components")
|
||||||
@click.option("--env-file", help="env file to be used")
|
@click.option("--env-file", help="env file to be used")
|
||||||
@click.option("--cluster", help="specify a non-default cluster name")
|
@click.option("--cluster", help="specify a non-default cluster name")
|
||||||
@click.option(
|
@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)")
|
||||||
"--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
||||||
"""deploy a stack"""
|
'''deploy a stack'''
|
||||||
|
|
||||||
# k8s subcommand doesn't require a stack
|
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
|
||||||
if ctx.invoked_subcommand == "k8s":
|
# Click doesn't allow us to know that here, so we make providing the stack mandatory
|
||||||
return
|
|
||||||
|
|
||||||
# Although in theory for some subcommands (e.g. deploy create) the stack
|
|
||||||
# can be inferred, Click doesn't allow us to know that here, so we make
|
|
||||||
# providing the stack mandatory
|
|
||||||
stack = global_options2(ctx).stack
|
stack = global_options2(ctx).stack
|
||||||
if not stack:
|
if not stack:
|
||||||
print("Error: --stack option is required")
|
print("Error: --stack option is required")
|
||||||
@ -79,66 +68,30 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
|||||||
deploy_to = "compose"
|
deploy_to = "compose"
|
||||||
|
|
||||||
stack = get_stack_path(stack)
|
stack = get_stack_path(stack)
|
||||||
ctx.obj = create_deploy_context(
|
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
|
||||||
global_options2(ctx),
|
|
||||||
None,
|
|
||||||
stack,
|
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
cluster,
|
|
||||||
env_file,
|
|
||||||
deploy_to,
|
|
||||||
)
|
|
||||||
# Subcommand is executed now, by the magic of click
|
# Subcommand is executed now, by the magic of click
|
||||||
|
|
||||||
|
|
||||||
def create_deploy_context(
|
def create_deploy_context(
|
||||||
global_context,
|
global_context,
|
||||||
deployment_context: Optional[DeploymentContext],
|
deployment_context: DeploymentContext,
|
||||||
stack,
|
stack,
|
||||||
include,
|
include,
|
||||||
exclude,
|
exclude,
|
||||||
cluster,
|
cluster,
|
||||||
env_file,
|
env_file,
|
||||||
deploy_to,
|
deploy_to) -> DeployCommandContext:
|
||||||
) -> DeployCommandContext:
|
|
||||||
# Extract the cluster name from the deployment, if we have one
|
# Extract the cluster name from the deployment, if we have one
|
||||||
if deployment_context and cluster is None:
|
if deployment_context and cluster is None:
|
||||||
cluster = deployment_context.get_cluster_id()
|
cluster = deployment_context.get_cluster_id()
|
||||||
|
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
|
||||||
# Check if this is a helm chart deployment (has chart/ but no compose/)
|
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
|
||||||
# TODO: Add a new deployment type for helm chart deployments
|
|
||||||
# To avoid relying on chart existence in such cases
|
|
||||||
is_helm_chart_deployment = False
|
|
||||||
if deployment_context:
|
|
||||||
chart_dir = deployment_context.deployment_dir / "chart"
|
|
||||||
compose_dir = deployment_context.deployment_dir / "compose"
|
|
||||||
is_helm_chart_deployment = chart_dir.exists() and not compose_dir.exists()
|
|
||||||
|
|
||||||
# For helm chart deployments, skip compose file loading
|
|
||||||
if is_helm_chart_deployment:
|
|
||||||
cluster_context = ClusterContext(
|
|
||||||
global_context, cluster, [], [], [], None, env_file
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
cluster_context = _make_cluster_context(
|
|
||||||
global_context, stack, include, exclude, cluster, env_file
|
|
||||||
)
|
|
||||||
|
|
||||||
deployer = getDeployer(
|
|
||||||
deploy_to,
|
|
||||||
deployment_context,
|
|
||||||
compose_files=cluster_context.compose_files,
|
|
||||||
compose_project_name=cluster_context.cluster,
|
compose_project_name=cluster_context.cluster,
|
||||||
compose_env_file=cluster_context.env_file,
|
compose_env_file=cluster_context.env_file)
|
||||||
job_compose_files=cluster_context.job_compose_files,
|
|
||||||
)
|
|
||||||
return DeployCommandContext(stack, cluster_context, deployer)
|
return DeployCommandContext(stack, cluster_context, deployer)
|
||||||
|
|
||||||
|
|
||||||
def up_operation(
|
def up_operation(ctx, services_list, stay_attached=False):
|
||||||
ctx, services_list, stay_attached=False, skip_cluster_management=False
|
|
||||||
):
|
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
cluster_context = deploy_context.cluster_context
|
cluster_context = deploy_context.cluster_context
|
||||||
@ -146,38 +99,21 @@ def up_operation(
|
|||||||
for attr, value in container_exec_env.items():
|
for attr, value in container_exec_env.items():
|
||||||
os.environ[attr] = value
|
os.environ[attr] = value
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||||
f"Running compose up with container_exec_env: {container_exec_env}, "
|
|
||||||
f"extra_args: {services_list}"
|
|
||||||
)
|
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.deployer.up(
|
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
||||||
detach=not stay_attached,
|
|
||||||
skip_cluster_management=skip_cluster_management,
|
|
||||||
services=services_list,
|
|
||||||
)
|
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(
|
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||||
global_context,
|
|
||||||
cluster_context.config,
|
|
||||||
deploy_context.deployer,
|
|
||||||
container_exec_env,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
def down_operation(ctx, delete_volumes, extra_args_list):
|
||||||
timeout_arg = None
|
timeout_arg = None
|
||||||
if extra_args_list:
|
if extra_args_list:
|
||||||
timeout_arg = extra_args_list[0]
|
timeout_arg = extra_args_list[0]
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to
|
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||||
# shutdown gracefully
|
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
||||||
ctx.obj.deployer.down(
|
|
||||||
timeout=timeout_arg,
|
|
||||||
volumes=delete_volumes,
|
|
||||||
skip_cluster_management=skip_cluster_management,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
@ -204,11 +140,7 @@ def ps_operation(ctx):
|
|||||||
if mapping is None:
|
if mapping is None:
|
||||||
print(f"{port_mapping}", end="")
|
print(f"{port_mapping}", end="")
|
||||||
else:
|
else:
|
||||||
print(
|
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
|
||||||
f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}"
|
|
||||||
f"->{port_mapping}",
|
|
||||||
end="",
|
|
||||||
)
|
|
||||||
comma = ", "
|
comma = ", "
|
||||||
print()
|
print()
|
||||||
else:
|
else:
|
||||||
@ -243,9 +175,7 @@ def exec_operation(ctx, extra_args):
|
|||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose exec {service_name} {command_to_exec}")
|
print(f"Running compose exec {service_name} {command_to_exec}")
|
||||||
try:
|
try:
|
||||||
ctx.obj.deployer.execute(
|
ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True)
|
||||||
service_name, command_to_exec, envs=container_exec_env, tty=True
|
|
||||||
)
|
|
||||||
except DeployerException:
|
except DeployerException:
|
||||||
print("container command returned error exit status")
|
print("container command returned error exit status")
|
||||||
|
|
||||||
@ -253,26 +183,13 @@ def exec_operation(ctx, extra_args):
|
|||||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
services_list = extra_args_list if extra_args_list is not None else []
|
services_list = extra_args_list if extra_args_list is not None else []
|
||||||
logs_stream = ctx.obj.deployer.logs(
|
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
||||||
services=services_list, tail=tail, follow=follow, stream=True
|
|
||||||
)
|
|
||||||
for stream_type, stream_content in logs_stream:
|
for stream_type, stream_content in logs_stream:
|
||||||
print(stream_content.decode("utf-8"), end="")
|
print(stream_content.decode("utf-8"), end="")
|
||||||
|
|
||||||
|
|
||||||
def run_job_operation(ctx, job_name: str, helm_release: Optional[str] = None):
|
|
||||||
global_context = ctx.parent.parent.obj
|
|
||||||
if not global_context.dry_run:
|
|
||||||
print(f"Running job: {job_name}")
|
|
||||||
try:
|
|
||||||
ctx.obj.deployer.run_job(job_name, helm_release)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error running job {job_name}: {e}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, extra_args):
|
def up(ctx, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
@ -280,10 +197,8 @@ def up(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option(
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
|
||||||
)
|
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: down<service1> <service2>
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, extra_args):
|
def down(ctx, delete_volumes, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
@ -297,14 +212,14 @@ def ps(ctx):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
port_operation(ctx, extra_args)
|
port_operation(ctx, extra_args)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
exec_operation(ctx, extra_args)
|
exec_operation(ctx, extra_args)
|
||||||
@ -313,49 +228,44 @@ def exec(ctx, extra_args):
|
|||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
logs_operation(ctx, tail, follow, extra_args)
|
logs_operation(ctx, tail, follow, extra_args)
|
||||||
|
|
||||||
|
|
||||||
def get_stack_status(ctx, stack):
|
def get_stack_status(ctx, stack):
|
||||||
|
|
||||||
ctx_copy = copy.copy(ctx)
|
ctx_copy = copy.copy(ctx)
|
||||||
ctx_copy.stack = stack
|
ctx_copy.stack = stack
|
||||||
|
|
||||||
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
||||||
deployer = DockerDeployer(
|
deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
||||||
type="compose",
|
|
||||||
deployment_context=None,
|
|
||||||
compose_files=cluster_context.compose_files,
|
|
||||||
compose_project_name=cluster_context.cluster,
|
|
||||||
compose_env_file=cluster_context.env_file,
|
|
||||||
)
|
|
||||||
# TODO: refactor to avoid duplicating this code above
|
# TODO: refactor to avoid duplicating this code above
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Running compose ps")
|
print("Running compose ps")
|
||||||
container_list = deployer.ps()
|
container_list = deployer.ps()
|
||||||
if container_list is None or len(container_list) == 0:
|
if len(container_list) > 0:
|
||||||
if ctx.debug:
|
|
||||||
print("No containers found from compose ps")
|
|
||||||
return False
|
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"Container list from compose ps: {container_list}")
|
print(f"Container list from compose ps: {container_list}")
|
||||||
return True
|
return True
|
||||||
|
else:
|
||||||
|
if ctx.debug:
|
||||||
|
print("No containers found from compose ps")
|
||||||
|
False
|
||||||
|
|
||||||
|
|
||||||
def _make_runtime_env(ctx):
|
def _make_runtime_env(ctx):
|
||||||
container_exec_env = {
|
container_exec_env = {
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}"
|
||||||
}
|
}
|
||||||
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
||||||
return container_exec_env
|
return container_exec_env
|
||||||
|
|
||||||
|
|
||||||
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
||||||
# Create default unique, stable cluster name from confile file path and
|
# Create default unique, stable cluster name from confile file path and stack name if provided
|
||||||
# stack name if provided
|
|
||||||
if deployment:
|
if deployment:
|
||||||
path = os.path.realpath(os.path.abspath(compose_dir))
|
path = os.path.realpath(os.path.abspath(compose_dir))
|
||||||
else:
|
else:
|
||||||
@ -370,8 +280,7 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
|||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
# stack has to be either PathLike pointing to a stack yml file, or a
|
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
||||||
# string with the name of a known stack
|
|
||||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
dev_root_path = get_dev_root_path(ctx)
|
dev_root_path = get_dev_root_path(ctx)
|
||||||
|
|
||||||
@ -380,37 +289,28 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if deployment:
|
if deployment:
|
||||||
compose_dir = stack.joinpath("compose")
|
compose_dir = stack.joinpath("compose")
|
||||||
else:
|
else:
|
||||||
# See:
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
||||||
compose_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
|
||||||
)
|
|
||||||
|
|
||||||
if cluster is None:
|
if cluster is None:
|
||||||
cluster = _make_default_cluster_name(
|
cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
||||||
deployment, compose_dir, stack, include, exclude
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
|
||||||
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
||||||
all_pods = pod_list_file.read().splitlines()
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
pods_in_scope = []
|
pods_in_scope = []
|
||||||
cluster_config = None
|
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if stack_config is not None:
|
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
pods_in_scope = stack_config.get("pods") or []
|
pods_in_scope = stack_config['pods']
|
||||||
cluster_config = (
|
cluster_config = stack_config['config'] if 'config' in stack_config else None
|
||||||
stack_config["config"] if "config" in stack_config else None
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
pods_in_scope = all_pods
|
pods_in_scope = all_pods
|
||||||
|
cluster_config = None
|
||||||
|
|
||||||
# Convert all pod definitions to v1.1 format
|
# Convert all pod definitions to v1.1 format
|
||||||
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
||||||
@ -430,47 +330,29 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if include_exclude_check(pod_name, include, exclude):
|
if include_exclude_check(pod_name, include, exclude):
|
||||||
if pod_repository is None or pod_repository == "internal":
|
if pod_repository is None or pod_repository == "internal":
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(
|
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
||||||
compose_dir, f"docker-compose-{pod_path}.yml"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
compose_file_name = resolve_compose_file(stack, pod_name)
|
compose_file_name = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(
|
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
||||||
compose_dir, f"docker-compose-{pod_name}.yml"
|
|
||||||
)
|
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
script_dir = compose_dir.parent.joinpath(
|
script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
|
||||||
"pods", pod_name, "scripts"
|
|
||||||
)
|
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(
|
pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
|
||||||
os.path.join(script_dir, pod_pre_start_command)
|
|
||||||
)
|
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(
|
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
||||||
os.path.join(script_dir, pod_post_start_command)
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# TODO: fix this code for external stack with scripts
|
# TODO: fix this code for external stack with scripts
|
||||||
pod_root_dir = os.path.join(
|
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
||||||
dev_root_path, pod_repository.split("/")[-1], pod["path"]
|
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
||||||
)
|
|
||||||
compose_file_name = os.path.join(
|
|
||||||
pod_root_dir, f"docker-compose-{pod_name}.yml"
|
|
||||||
)
|
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(
|
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
||||||
os.path.join(pod_root_dir, pod_pre_start_command)
|
|
||||||
)
|
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(
|
post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
|
||||||
os.path.join(pod_root_dir, pod_post_start_command)
|
|
||||||
)
|
|
||||||
compose_files.append(compose_file_name)
|
compose_files.append(compose_file_name)
|
||||||
else:
|
else:
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
@ -479,32 +361,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"files: {compose_files}")
|
print(f"files: {compose_files}")
|
||||||
|
|
||||||
# Gather job compose files (from compose-jobs/ directory in deployment)
|
return ClusterContext(ctx, cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
|
||||||
job_compose_files = []
|
|
||||||
if deployment and stack:
|
|
||||||
stack_config = get_parsed_stack_config(stack)
|
|
||||||
if stack_config:
|
|
||||||
jobs = get_job_list(stack_config)
|
|
||||||
compose_jobs_dir = stack.joinpath("compose-jobs")
|
|
||||||
for job in jobs:
|
|
||||||
job_file_name = os.path.join(
|
|
||||||
compose_jobs_dir, f"docker-compose-{job}.yml"
|
|
||||||
)
|
|
||||||
if os.path.exists(job_file_name):
|
|
||||||
job_compose_files.append(job_file_name)
|
|
||||||
if ctx.verbose:
|
|
||||||
print(f"job files: {job_compose_files}")
|
|
||||||
|
|
||||||
return ClusterContext(
|
|
||||||
ctx,
|
|
||||||
cluster,
|
|
||||||
compose_files,
|
|
||||||
pre_start_commands,
|
|
||||||
post_start_commands,
|
|
||||||
cluster_config,
|
|
||||||
env_file,
|
|
||||||
job_compose_files=job_compose_files if job_compose_files else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_new_format(old_pod_array):
|
def _convert_to_new_format(old_pod_array):
|
||||||
@ -513,7 +370,11 @@ def _convert_to_new_format(old_pod_array):
|
|||||||
if isinstance(old_pod, dict):
|
if isinstance(old_pod, dict):
|
||||||
new_pod_array.append(old_pod)
|
new_pod_array.append(old_pod)
|
||||||
else:
|
else:
|
||||||
new_pod = {"name": old_pod, "repository": "internal", "path": old_pod}
|
new_pod = {
|
||||||
|
"name": old_pod,
|
||||||
|
"repository": "internal",
|
||||||
|
"path": old_pod
|
||||||
|
}
|
||||||
new_pod_array.append(new_pod)
|
new_pod_array.append(new_pod)
|
||||||
return new_pod_array
|
return new_pod_array
|
||||||
|
|
||||||
@ -527,15 +388,14 @@ def _run_command(ctx, cluster_name, command):
|
|||||||
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
||||||
command_result = subprocess.run(
|
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
|
||||||
command_file, shell=True, env=command_env, cwd=command_dir
|
|
||||||
)
|
|
||||||
if command_result.returncode != 0:
|
if command_result.returncode != 0:
|
||||||
print(f"FATAL Error running command: {command}")
|
print(f"FATAL Error running command: {command}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ConfigDirective:
|
class ConfigDirective:
|
||||||
source_container: str
|
source_container: str
|
||||||
@ -553,32 +413,24 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
container_config[directive].split(".")[0],
|
container_config[directive].split(".")[0],
|
||||||
container_config[directive].split(".")[1],
|
container_config[directive].split(".")[1],
|
||||||
container,
|
container,
|
||||||
directive,
|
directive
|
||||||
)
|
)
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(
|
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
|
||||||
f"Setting {pd.destination_container}.{pd.destination_variable}"
|
f" = {pd.source_container}.{pd.source_variable}")
|
||||||
f" = {pd.source_container}.{pd.source_variable}"
|
|
||||||
)
|
|
||||||
# TODO: add a timeout
|
# TODO: add a timeout
|
||||||
waiting_for_data = True
|
waiting_for_data = True
|
||||||
destination_output = "*** no output received yet ***"
|
destination_output = "*** no output received yet ***"
|
||||||
while waiting_for_data:
|
while waiting_for_data:
|
||||||
# TODO: fix the script paths so they're consistent between
|
# TODO: fix the script paths so they're consistent between containers
|
||||||
# containers
|
|
||||||
source_value = None
|
source_value = None
|
||||||
try:
|
try:
|
||||||
source_value = deployer.execute(
|
source_value = deployer.execute(pd.source_container,
|
||||||
pd.source_container,
|
["sh", "-c",
|
||||||
[
|
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
"sh /docker-entrypoint-scripts.d/export-"
|
"sh /docker-entrypoint-scripts.d/export-"
|
||||||
f"{pd.source_variable}.sh",
|
f"{pd.source_variable}.sh"],
|
||||||
],
|
|
||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env,
|
envs=container_exec_env)
|
||||||
)
|
|
||||||
except DeployerException as error:
|
except DeployerException as error:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"Docker exception reading config source: {error}")
|
print(f"Docker exception reading config source: {error}")
|
||||||
@ -586,28 +438,20 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
# "It returned with code 1"
|
# "It returned with code 1"
|
||||||
if "It returned with code 1" in str(error):
|
if "It returned with code 1" in str(error):
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(
|
print("Config export script returned an error, re-trying")
|
||||||
"Config export script returned an error, re-trying"
|
# If the script failed to execute (e.g. the file is not there) then we get:
|
||||||
)
|
|
||||||
# If the script failed to execute
|
|
||||||
# (e.g. the file is not there) then we get:
|
|
||||||
# "It returned with code 2"
|
# "It returned with code 2"
|
||||||
if "It returned with code 2" in str(error):
|
if "It returned with code 2" in str(error):
|
||||||
print(f"Fatal error reading config source: {error}")
|
print(f"Fatal error reading config source: {error}")
|
||||||
if source_value:
|
if source_value:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"fetched source value: {source_value}")
|
print(f"fetched source value: {source_value}")
|
||||||
destination_output = deployer.execute(
|
destination_output = deployer.execute(pd.destination_container,
|
||||||
pd.destination_container,
|
["sh", "-c",
|
||||||
[
|
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
f"sh /scripts/import-{pd.destination_variable}.sh"
|
f"sh /scripts/import-{pd.destination_variable}.sh"
|
||||||
f" {source_value}",
|
f" {source_value}"],
|
||||||
],
|
|
||||||
tty=False,
|
tty=False,
|
||||||
envs=container_exec_env,
|
envs=container_exec_env)
|
||||||
)
|
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug and not waiting_for_data:
|
if ctx.debug and not waiting_for_data:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
@ -616,4 +460,3 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
command.add_command(deployment_init)
|
command.add_command(deployment_init)
|
||||||
command.add_command(deployment_create)
|
command.add_command(deployment_create)
|
||||||
command.add_command(deployment_setup)
|
command.add_command(deployment_setup)
|
||||||
command.add_command(k8s_command.command, "k8s")
|
|
||||||
|
|||||||
@ -13,7 +13,7 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from typing import List, Mapping, Optional
|
from typing import List, Mapping
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.command_types import CommandOptions
|
||||||
from stack_orchestrator.deploy.deployer import Deployer
|
from stack_orchestrator.deploy.deployer import Deployer
|
||||||
@ -21,22 +21,20 @@ from stack_orchestrator.deploy.deployer import Deployer
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ClusterContext:
|
class ClusterContext:
|
||||||
# TODO: this should be in its own object not stuffed in here
|
options: CommandOptions # TODO: this should be in its own object not stuffed in here
|
||||||
options: CommandOptions
|
cluster: str
|
||||||
cluster: Optional[str]
|
|
||||||
compose_files: List[str]
|
compose_files: List[str]
|
||||||
pre_start_commands: List[str]
|
pre_start_commands: List[str]
|
||||||
post_start_commands: List[str]
|
post_start_commands: List[str]
|
||||||
config: Optional[str]
|
config: str
|
||||||
env_file: Optional[str]
|
env_file: str
|
||||||
job_compose_files: Optional[List[str]] = None
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DeployCommandContext:
|
class DeployCommandContext:
|
||||||
stack: str
|
stack: str
|
||||||
cluster_context: ClusterContext
|
cluster_context: ClusterContext
|
||||||
deployer: Optional[Deployer]
|
deployer: Deployer
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -52,10 +50,8 @@ class LaconicStackSetupCommand:
|
|||||||
key_name: str
|
key_name: str
|
||||||
initialize_network: bool
|
initialize_network: bool
|
||||||
join_network: bool
|
join_network: bool
|
||||||
connect_network: bool
|
|
||||||
create_network: bool
|
create_network: bool
|
||||||
gentx_file_list: str
|
gentx_file_list: str
|
||||||
gentx_address_list: str
|
|
||||||
genesis_file: str
|
genesis_file: str
|
||||||
network_dir: str
|
network_dir: str
|
||||||
|
|
||||||
|
|||||||
@ -13,14 +13,10 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from stack_orchestrator.util import (
|
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
||||||
get_parsed_stack_config,
|
|
||||||
get_yaml,
|
|
||||||
get_pod_list,
|
|
||||||
resolve_compose_file,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
@ -78,28 +74,17 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def run_container_command(
|
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
|
||||||
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]
|
|
||||||
):
|
|
||||||
deployer = ctx.deployer
|
deployer = ctx.deployer
|
||||||
if deployer is None:
|
|
||||||
raise ValueError("Deployer is not configured")
|
|
||||||
container_image = _container_image_from_service(ctx.stack, service)
|
container_image = _container_image_from_service(ctx.stack, service)
|
||||||
if container_image is None:
|
|
||||||
raise ValueError(f"Container image not found for service: {service}")
|
|
||||||
docker_volumes = _volumes_to_docker(mounts)
|
docker_volumes = _volumes_to_docker(mounts)
|
||||||
if ctx.cluster_context.options.debug:
|
if ctx.cluster_context.options.debug:
|
||||||
print(f"Running this command in {service} container: {command}")
|
print(f"Running this command in {service} container: {command}")
|
||||||
docker_output = deployer.run(
|
docker_output = deployer.run(
|
||||||
container_image,
|
container_image,
|
||||||
["-c", command],
|
["-c", command], entrypoint="sh",
|
||||||
entrypoint="sh",
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
# Current laconicd container has a bug where it crashes when run not
|
volumes=docker_volumes
|
||||||
# as root
|
|
||||||
# Commented out line below is a workaround. Created files end up
|
|
||||||
# owned by root on the host
|
|
||||||
# user=f"{os.getuid()}:{os.getgid()}",
|
|
||||||
volumes=docker_volumes,
|
|
||||||
)
|
)
|
||||||
# There doesn't seem to be a way to get an exit code from docker.run()
|
# There doesn't seem to be a way to get an exit code from docker.run()
|
||||||
return (docker_output, 0)
|
return (docker_output, 0)
|
||||||
|
|||||||
@ -15,16 +15,16 @@
|
|||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
class Deployer(ABC):
|
class Deployer(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, services):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def down(self, timeout, volumes, skip_cluster_management):
|
def down(self, timeout, volumes):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -52,21 +52,7 @@ class Deployer(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run(
|
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||||
self,
|
|
||||||
image: str,
|
|
||||||
command=None,
|
|
||||||
user=None,
|
|
||||||
volumes=None,
|
|
||||||
entrypoint=None,
|
|
||||||
env={},
|
|
||||||
ports=[],
|
|
||||||
detach=False,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -76,6 +62,7 @@ class DeployerException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class DeployerConfigGenerator(ABC):
|
class DeployerConfigGenerator(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -14,14 +14,8 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.k8s.deploy_k8s import (
|
from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator
|
||||||
K8sDeployer,
|
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
||||||
K8sDeployerConfigGenerator,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.compose.deploy_docker import (
|
|
||||||
DockerDeployer,
|
|
||||||
DockerDeployerConfigGenerator,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def getDeployerConfigGenerator(type: str, deployment_context):
|
def getDeployerConfigGenerator(type: str, deployment_context):
|
||||||
@ -33,33 +27,10 @@ def getDeployerConfigGenerator(type: str, deployment_context):
|
|||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|
||||||
|
|
||||||
def getDeployer(
|
def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file):
|
||||||
type: str,
|
|
||||||
deployment_context,
|
|
||||||
compose_files,
|
|
||||||
compose_project_name,
|
|
||||||
compose_env_file,
|
|
||||||
job_compose_files=None,
|
|
||||||
):
|
|
||||||
if type == "compose" or type is None:
|
if type == "compose" or type is None:
|
||||||
return DockerDeployer(
|
return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
||||||
type,
|
elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
||||||
deployment_context,
|
return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
||||||
compose_files,
|
|
||||||
compose_project_name,
|
|
||||||
compose_env_file,
|
|
||||||
)
|
|
||||||
elif (
|
|
||||||
type == type == constants.k8s_deploy_type
|
|
||||||
or type == constants.k8s_kind_deploy_type
|
|
||||||
):
|
|
||||||
return K8sDeployer(
|
|
||||||
type,
|
|
||||||
deployment_context,
|
|
||||||
compose_files,
|
|
||||||
compose_project_name,
|
|
||||||
compose_env_file,
|
|
||||||
job_compose_files=job_compose_files,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|||||||
@ -15,24 +15,11 @@
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import subprocess
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.images import push_images_operation
|
from stack_orchestrator.deploy.images import push_images_operation
|
||||||
from stack_orchestrator.deploy.deploy import (
|
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation
|
||||||
up_operation,
|
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context, update_operation
|
||||||
down_operation,
|
|
||||||
ps_operation,
|
|
||||||
port_operation,
|
|
||||||
status_operation,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deploy import (
|
|
||||||
exec_operation,
|
|
||||||
logs_operation,
|
|
||||||
create_deploy_context,
|
|
||||||
update_operation,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
|
||||||
@ -41,7 +28,7 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
|||||||
@click.option("--dir", required=True, help="path to deployment directory")
|
@click.option("--dir", required=True, help="path to deployment directory")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, dir):
|
def command(ctx, dir):
|
||||||
"""manage a deployment"""
|
'''manage a deployment'''
|
||||||
|
|
||||||
# Check that --stack wasn't supplied
|
# Check that --stack wasn't supplied
|
||||||
if ctx.parent.obj.stack:
|
if ctx.parent.obj.stack:
|
||||||
@ -53,10 +40,7 @@ def command(ctx, dir):
|
|||||||
print(f"Error: deployment directory {dir} does not exist")
|
print(f"Error: deployment directory {dir} does not exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not dir_path.is_dir():
|
if not dir_path.is_dir():
|
||||||
print(
|
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
|
||||||
f"Error: supplied deployment directory path {dir} exists but is a "
|
|
||||||
"file not a directory"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Store the deployment context for subcommands
|
# Store the deployment context for subcommands
|
||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
@ -73,93 +57,51 @@ def make_deploy_context(ctx) -> DeployCommandContext:
|
|||||||
else:
|
else:
|
||||||
deployment_type = constants.compose_deploy_type
|
deployment_type = constants.compose_deploy_type
|
||||||
stack = context.deployment_dir
|
stack = context.deployment_dir
|
||||||
return create_deploy_context(
|
return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None,
|
||||||
ctx.parent.parent.obj,
|
cluster_name, env_file, deployment_type)
|
||||||
context,
|
|
||||||
stack,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
cluster_name,
|
|
||||||
env_file,
|
|
||||||
deployment_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for start
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option(
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
"--stay-attached/--detatch-terminal",
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
default=False,
|
|
||||||
help="detatch or not to see container stdout",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--skip-cluster-management/--perform-cluster-management",
|
|
||||||
default=False,
|
|
||||||
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
|
||||||
)
|
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
def up(ctx, stay_attached, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list, stay_attached, skip_cluster_management)
|
up_operation(ctx, services_list, stay_attached)
|
||||||
|
|
||||||
|
|
||||||
# start is the preferred alias for up
|
# start is the preferred alias for up
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option(
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
"--stay-attached/--detatch-terminal",
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
default=False,
|
|
||||||
help="detatch or not to see container stdout",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--skip-cluster-management/--perform-cluster-management",
|
|
||||||
default=False,
|
|
||||||
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
|
||||||
)
|
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
def start(ctx, stay_attached, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list, stay_attached, skip_cluster_management)
|
up_operation(ctx, services_list, stay_attached)
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for stop
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option(
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--skip-cluster-management/--perform-cluster-management",
|
|
||||||
default=False,
|
|
||||||
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
|
||||||
)
|
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def down(ctx, delete_volumes, extra_args):
|
||||||
# Get the stack config file name
|
# Get the stack config file name
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
down_operation(ctx, delete_volumes, extra_args, skip_cluster_management)
|
down_operation(ctx, delete_volumes, extra_args)
|
||||||
|
|
||||||
|
|
||||||
# stop is the preferred alias for down
|
# stop is the preferred alias for down
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option(
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--skip-cluster-management/--perform-cluster-management",
|
|
||||||
default=False,
|
|
||||||
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
|
||||||
)
|
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def stop(ctx, delete_volumes, extra_args):
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
down_operation(ctx, delete_volumes, extra_args, skip_cluster_management)
|
down_operation(ctx, delete_volumes, extra_args)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@ -178,7 +120,7 @@ def push_images(ctx):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -186,7 +128,7 @@ def port(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -196,7 +138,7 @@ def exec(ctx, extra_args):
|
|||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -215,191 +157,3 @@ def status(ctx):
|
|||||||
def update(ctx):
|
def update(ctx):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
update_operation(ctx)
|
update_operation(ctx)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.argument("job_name")
|
|
||||||
@click.option(
|
|
||||||
"--helm-release",
|
|
||||||
help="Helm release name (for k8s helm chart deployments, defaults to chart name)",
|
|
||||||
)
|
|
||||||
@click.pass_context
|
|
||||||
def run_job(ctx, job_name, helm_release):
|
|
||||||
"""run a one-time job from the stack"""
|
|
||||||
from stack_orchestrator.deploy.deploy import run_job_operation
|
|
||||||
|
|
||||||
ctx.obj = make_deploy_context(ctx)
|
|
||||||
run_job_operation(ctx, job_name, helm_release)
|
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
|
||||||
@click.option("--stack-path", help="Path to stack git repo (overrides stored path)")
|
|
||||||
@click.option(
|
|
||||||
"--spec-file", help="Path to GitOps spec.yml in repo (e.g., deployment/spec.yml)"
|
|
||||||
)
|
|
||||||
@click.option("--config-file", help="Config file to pass to deploy init")
|
|
||||||
@click.option(
|
|
||||||
"--force",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Skip DNS verification",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--expected-ip",
|
|
||||||
help="Expected IP for DNS verification (if different from egress)",
|
|
||||||
)
|
|
||||||
@click.pass_context
|
|
||||||
def restart(ctx, stack_path, spec_file, config_file, force, expected_ip):
|
|
||||||
"""Pull latest code and restart deployment using git-tracked spec.
|
|
||||||
|
|
||||||
GitOps workflow:
|
|
||||||
1. Operator maintains spec.yml in their git repository
|
|
||||||
2. This command pulls latest code (including updated spec.yml)
|
|
||||||
3. If hostname changed, verifies DNS routes to this server
|
|
||||||
4. Syncs deployment directory with the git-tracked spec
|
|
||||||
5. Stops and restarts the deployment
|
|
||||||
|
|
||||||
Data volumes are always preserved. The cluster is never destroyed.
|
|
||||||
|
|
||||||
Stack source resolution (in order):
|
|
||||||
1. --stack-path argument (if provided)
|
|
||||||
2. stack-source field in deployment.yml (if stored)
|
|
||||||
3. Error if neither available
|
|
||||||
|
|
||||||
Note: spec.yml should be maintained in git, not regenerated from
|
|
||||||
commands.py on each restart. Use 'deploy init' only for initial
|
|
||||||
spec generation, then customize and commit to your operator repo.
|
|
||||||
"""
|
|
||||||
from stack_orchestrator.util import get_yaml, get_parsed_deployment_spec
|
|
||||||
from stack_orchestrator.deploy.deployment_create import create_operation
|
|
||||||
from stack_orchestrator.deploy.dns_probe import verify_dns_via_probe
|
|
||||||
|
|
||||||
deployment_context: DeploymentContext = ctx.obj
|
|
||||||
|
|
||||||
# Get current spec info (before git pull)
|
|
||||||
current_spec = deployment_context.spec
|
|
||||||
current_http_proxy = current_spec.get_http_proxy()
|
|
||||||
current_hostname = (
|
|
||||||
current_http_proxy[0]["host-name"] if current_http_proxy else None
|
|
||||||
)
|
|
||||||
|
|
||||||
# Resolve stack source path
|
|
||||||
if stack_path:
|
|
||||||
stack_source = Path(stack_path).resolve()
|
|
||||||
else:
|
|
||||||
# Try to get from deployment.yml
|
|
||||||
deployment_file = (
|
|
||||||
deployment_context.deployment_dir / constants.deployment_file_name
|
|
||||||
)
|
|
||||||
deployment_data = get_yaml().load(open(deployment_file))
|
|
||||||
stack_source_str = deployment_data.get("stack-source")
|
|
||||||
if not stack_source_str:
|
|
||||||
print(
|
|
||||||
"Error: No stack-source in deployment.yml and --stack-path not provided"
|
|
||||||
)
|
|
||||||
print("Use --stack-path to specify the stack git repository location")
|
|
||||||
sys.exit(1)
|
|
||||||
stack_source = Path(stack_source_str)
|
|
||||||
|
|
||||||
if not stack_source.exists():
|
|
||||||
print(f"Error: Stack source path does not exist: {stack_source}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
print("=== Deployment Restart ===")
|
|
||||||
print(f"Deployment dir: {deployment_context.deployment_dir}")
|
|
||||||
print(f"Stack source: {stack_source}")
|
|
||||||
print(f"Current hostname: {current_hostname}")
|
|
||||||
|
|
||||||
# Step 1: Git pull (brings in updated spec.yml from operator's repo)
|
|
||||||
print("\n[1/4] Pulling latest code from stack repository...")
|
|
||||||
git_result = subprocess.run(
|
|
||||||
["git", "pull"], cwd=stack_source, capture_output=True, text=True
|
|
||||||
)
|
|
||||||
if git_result.returncode != 0:
|
|
||||||
print(f"Git pull failed: {git_result.stderr}")
|
|
||||||
sys.exit(1)
|
|
||||||
print(f"Git pull: {git_result.stdout.strip()}")
|
|
||||||
|
|
||||||
# Determine spec file location
|
|
||||||
# Priority: --spec-file argument > repo's deployment/spec.yml > deployment dir
|
|
||||||
# Stack path is like: repo/stack_orchestrator/data/stacks/stack-name
|
|
||||||
# So repo root is 4 parents up
|
|
||||||
repo_root = stack_source.parent.parent.parent.parent
|
|
||||||
if spec_file:
|
|
||||||
# Spec file relative to repo root
|
|
||||||
spec_file_path = repo_root / spec_file
|
|
||||||
else:
|
|
||||||
# Try standard GitOps location in repo
|
|
||||||
gitops_spec = repo_root / "deployment" / "spec.yml"
|
|
||||||
if gitops_spec.exists():
|
|
||||||
spec_file_path = gitops_spec
|
|
||||||
else:
|
|
||||||
# Fall back to deployment directory
|
|
||||||
spec_file_path = deployment_context.deployment_dir / "spec.yml"
|
|
||||||
|
|
||||||
if not spec_file_path.exists():
|
|
||||||
print(f"Error: spec.yml not found at {spec_file_path}")
|
|
||||||
print("For GitOps, add spec.yml to your repo at deployment/spec.yml")
|
|
||||||
print("Or specify --spec-file with path relative to repo root")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
print(f"Using spec: {spec_file_path}")
|
|
||||||
|
|
||||||
# Parse spec to check for hostname changes
|
|
||||||
new_spec_obj = get_parsed_deployment_spec(str(spec_file_path))
|
|
||||||
new_http_proxy = new_spec_obj.get("network", {}).get("http-proxy", [])
|
|
||||||
new_hostname = new_http_proxy[0]["host-name"] if new_http_proxy else None
|
|
||||||
|
|
||||||
print(f"Spec hostname: {new_hostname}")
|
|
||||||
|
|
||||||
# Step 2: DNS verification (only if hostname changed)
|
|
||||||
if new_hostname and new_hostname != current_hostname:
|
|
||||||
print(f"\n[2/4] Hostname changed: {current_hostname} -> {new_hostname}")
|
|
||||||
if force:
|
|
||||||
print("DNS verification skipped (--force)")
|
|
||||||
else:
|
|
||||||
print("Verifying DNS via probe...")
|
|
||||||
if not verify_dns_via_probe(new_hostname):
|
|
||||||
print(f"\nDNS verification failed for {new_hostname}")
|
|
||||||
print("Ensure DNS is configured before restarting.")
|
|
||||||
print("Use --force to skip this check.")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
print("\n[2/4] Hostname unchanged, skipping DNS verification")
|
|
||||||
|
|
||||||
# Step 3: Sync deployment directory with spec
|
|
||||||
print("\n[3/4] Syncing deployment directory...")
|
|
||||||
deploy_ctx = make_deploy_context(ctx)
|
|
||||||
create_operation(
|
|
||||||
deployment_command_context=deploy_ctx,
|
|
||||||
spec_file=str(spec_file_path),
|
|
||||||
deployment_dir=str(deployment_context.deployment_dir),
|
|
||||||
update=True,
|
|
||||||
network_dir=None,
|
|
||||||
initial_peers=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Reload deployment context with updated spec
|
|
||||||
deployment_context.init(deployment_context.deployment_dir)
|
|
||||||
ctx.obj = deployment_context
|
|
||||||
|
|
||||||
# Stop deployment
|
|
||||||
print("\n[4/4] Restarting deployment...")
|
|
||||||
ctx.obj = make_deploy_context(ctx)
|
|
||||||
down_operation(
|
|
||||||
ctx, delete_volumes=False, extra_args_list=[], skip_cluster_management=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Brief pause to ensure clean shutdown
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
# Start deployment
|
|
||||||
up_operation(
|
|
||||||
ctx, services_list=None, stay_attached=False, skip_cluster_management=True
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\n=== Restart Complete ===")
|
|
||||||
print("Deployment restarted with git-tracked configuration.")
|
|
||||||
if new_hostname and new_hostname != current_hostname:
|
|
||||||
print(f"\nNew hostname: {new_hostname}")
|
|
||||||
print("Caddy will automatically provision TLS certificate.")
|
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
# Copyright © 2022, 2023 Vulcanize
|
# Copyright © 2022, 2023 Vulcanize
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@ -44,20 +45,18 @@ class DeploymentContext:
|
|||||||
def get_compose_dir(self):
|
def get_compose_dir(self):
|
||||||
return self.deployment_dir.joinpath(constants.compose_dir_name)
|
return self.deployment_dir.joinpath(constants.compose_dir_name)
|
||||||
|
|
||||||
def get_compose_file(self, name: str):
|
|
||||||
return self.get_compose_dir() / f"docker-compose-{name}.yml"
|
|
||||||
|
|
||||||
def get_cluster_id(self):
|
def get_cluster_id(self):
|
||||||
return self.id
|
return self.id
|
||||||
|
|
||||||
def init(self, dir: Path):
|
def init(self, dir):
|
||||||
self.deployment_dir = dir.absolute()
|
self.deployment_dir = dir
|
||||||
self.spec = Spec()
|
self.spec = Spec()
|
||||||
self.spec.init_from_file(self.get_spec_file())
|
self.spec.init_from_file(self.get_spec_file())
|
||||||
self.stack = Stack(self.spec.obj["stack"])
|
self.stack = Stack(self.spec.obj["stack"])
|
||||||
self.stack.init_from_file(self.get_stack_file())
|
self.stack.init_from_file(self.get_stack_file())
|
||||||
deployment_file_path = self.get_deployment_file()
|
deployment_file_path = self.get_deployment_file()
|
||||||
if deployment_file_path.exists():
|
if deployment_file_path.exists():
|
||||||
|
with deployment_file_path:
|
||||||
obj = get_yaml().load(open(deployment_file_path, "r"))
|
obj = get_yaml().load(open(deployment_file_path, "r"))
|
||||||
self.id = obj[constants.cluster_id_key]
|
self.id = obj[constants.cluster_id_key]
|
||||||
# Handle the case of a legacy deployment with no file
|
# Handle the case of a legacy deployment with no file
|
||||||
@ -68,17 +67,3 @@ class DeploymentContext:
|
|||||||
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
||||||
self.id = f"{constants.cluster_name_prefix}{hash}"
|
self.id = f"{constants.cluster_name_prefix}{hash}"
|
||||||
|
|
||||||
def modify_yaml(self, file_path: Path, modifier_func):
|
|
||||||
"""Load a YAML, apply a modification function, and write it back."""
|
|
||||||
if not file_path.absolute().is_relative_to(self.deployment_dir):
|
|
||||||
raise ValueError(f"File is not inside deployment directory: {file_path}")
|
|
||||||
|
|
||||||
yaml = get_yaml()
|
|
||||||
with open(file_path, "r") as f:
|
|
||||||
yaml_data = yaml.load(f)
|
|
||||||
|
|
||||||
modifier_func(yaml_data)
|
|
||||||
|
|
||||||
with open(file_path, "w") as f:
|
|
||||||
yaml.dump(yaml_data, f)
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,159 +0,0 @@
|
|||||||
# Copyright © 2024 Vulcanize
|
|
||||||
# SPDX-License-Identifier: AGPL-3.0
|
|
||||||
|
|
||||||
"""DNS verification via temporary ingress probe."""
|
|
||||||
|
|
||||||
import secrets
|
|
||||||
import socket
|
|
||||||
import time
|
|
||||||
from typing import Optional
|
|
||||||
import requests
|
|
||||||
from kubernetes import client
|
|
||||||
|
|
||||||
|
|
||||||
def get_server_egress_ip() -> str:
|
|
||||||
"""Get this server's public egress IP via ipify."""
|
|
||||||
response = requests.get("https://api.ipify.org", timeout=10)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.text.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_hostname(hostname: str) -> list[str]:
|
|
||||||
"""Resolve hostname to list of IP addresses."""
|
|
||||||
try:
|
|
||||||
_, _, ips = socket.gethostbyname_ex(hostname)
|
|
||||||
return ips
|
|
||||||
except socket.gaierror:
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def verify_dns_simple(hostname: str, expected_ip: Optional[str] = None) -> bool:
|
|
||||||
"""Simple DNS verification - check hostname resolves to expected IP.
|
|
||||||
|
|
||||||
If expected_ip not provided, uses server's egress IP.
|
|
||||||
Returns True if hostname resolves to expected IP.
|
|
||||||
"""
|
|
||||||
resolved_ips = resolve_hostname(hostname)
|
|
||||||
if not resolved_ips:
|
|
||||||
print(f"DNS FAIL: {hostname} does not resolve")
|
|
||||||
return False
|
|
||||||
|
|
||||||
if expected_ip is None:
|
|
||||||
expected_ip = get_server_egress_ip()
|
|
||||||
|
|
||||||
if expected_ip in resolved_ips:
|
|
||||||
print(f"DNS OK: {hostname} -> {resolved_ips} (includes {expected_ip})")
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
print(f"DNS WARN: {hostname} -> {resolved_ips} (expected {expected_ip})")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def create_probe_ingress(hostname: str, namespace: str = "default") -> str:
|
|
||||||
"""Create a temporary ingress for DNS probing.
|
|
||||||
|
|
||||||
Returns the probe token that the ingress will respond with.
|
|
||||||
"""
|
|
||||||
token = secrets.token_hex(16)
|
|
||||||
|
|
||||||
networking_api = client.NetworkingV1Api()
|
|
||||||
|
|
||||||
# Create a simple ingress that Caddy will pick up
|
|
||||||
ingress = client.V1Ingress(
|
|
||||||
metadata=client.V1ObjectMeta(
|
|
||||||
name="laconic-dns-probe",
|
|
||||||
annotations={
|
|
||||||
"kubernetes.io/ingress.class": "caddy",
|
|
||||||
"laconic.com/probe-token": token,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
spec=client.V1IngressSpec(
|
|
||||||
rules=[
|
|
||||||
client.V1IngressRule(
|
|
||||||
host=hostname,
|
|
||||||
http=client.V1HTTPIngressRuleValue(
|
|
||||||
paths=[
|
|
||||||
client.V1HTTPIngressPath(
|
|
||||||
path="/.well-known/laconic-probe",
|
|
||||||
path_type="Exact",
|
|
||||||
backend=client.V1IngressBackend(
|
|
||||||
service=client.V1IngressServiceBackend(
|
|
||||||
name="caddy-ingress-controller",
|
|
||||||
port=client.V1ServiceBackendPort(number=80),
|
|
||||||
)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
networking_api.create_namespaced_ingress(namespace=namespace, body=ingress)
|
|
||||||
return token
|
|
||||||
|
|
||||||
|
|
||||||
def delete_probe_ingress(namespace: str = "default"):
|
|
||||||
"""Delete the temporary probe ingress."""
|
|
||||||
networking_api = client.NetworkingV1Api()
|
|
||||||
try:
|
|
||||||
networking_api.delete_namespaced_ingress(
|
|
||||||
name="laconic-dns-probe", namespace=namespace
|
|
||||||
)
|
|
||||||
except client.exceptions.ApiException:
|
|
||||||
pass # Ignore if already deleted
|
|
||||||
|
|
||||||
|
|
||||||
def verify_dns_via_probe(
|
|
||||||
hostname: str, namespace: str = "default", timeout: int = 30, poll_interval: int = 2
|
|
||||||
) -> bool:
|
|
||||||
"""Verify DNS by creating temp ingress and probing it.
|
|
||||||
|
|
||||||
This definitively proves that traffic to the hostname reaches this cluster.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hostname: The hostname to verify
|
|
||||||
namespace: Kubernetes namespace for probe ingress
|
|
||||||
timeout: Total seconds to wait for probe to succeed
|
|
||||||
poll_interval: Seconds between probe attempts
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if probe succeeds, False otherwise
|
|
||||||
"""
|
|
||||||
# First check DNS resolves at all
|
|
||||||
if not resolve_hostname(hostname):
|
|
||||||
print(f"DNS FAIL: {hostname} does not resolve")
|
|
||||||
return False
|
|
||||||
|
|
||||||
print(f"Creating probe ingress for {hostname}...")
|
|
||||||
create_probe_ingress(hostname, namespace)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Wait for Caddy to pick up the ingress
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
# Poll until success or timeout
|
|
||||||
probe_url = f"http://{hostname}/.well-known/laconic-probe"
|
|
||||||
start_time = time.time()
|
|
||||||
last_error = None
|
|
||||||
|
|
||||||
while time.time() - start_time < timeout:
|
|
||||||
try:
|
|
||||||
response = requests.get(probe_url, timeout=5)
|
|
||||||
# For now, just verify we get a response from this cluster
|
|
||||||
# A more robust check would verify a unique token
|
|
||||||
if response.status_code < 500:
|
|
||||||
print(f"DNS PROBE OK: {hostname} routes to this cluster")
|
|
||||||
return True
|
|
||||||
except requests.RequestException as e:
|
|
||||||
last_error = e
|
|
||||||
|
|
||||||
time.sleep(poll_interval)
|
|
||||||
|
|
||||||
print(f"DNS PROBE FAIL: {hostname} - {last_error}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
finally:
|
|
||||||
print("Cleaning up probe ingress...")
|
|
||||||
delete_probe_ingress(namespace)
|
|
||||||
@ -32,9 +32,7 @@ def _image_needs_pushed(image: str):
|
|||||||
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = (
|
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
|
||||||
)
|
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
return f"{remote_repo_url}/{image_name}:deploy"
|
return f"{remote_repo_url}/{image_name}:deploy"
|
||||||
@ -63,22 +61,17 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
|||||||
|
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
new_remote_tags = [
|
new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
||||||
_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags
|
|
||||||
]
|
|
||||||
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
||||||
|
|
||||||
|
|
||||||
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = (
|
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
|
||||||
)
|
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
# Salt the tag with part of the deployment id to make it unique to this
|
# Salt the tag with part of the deployment id to make it unique to this deployment
|
||||||
# deployment
|
|
||||||
deployment_tag = deployment_id[-8:]
|
deployment_tag = deployment_id[-8:]
|
||||||
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
||||||
else:
|
else:
|
||||||
@ -86,9 +79,7 @@ def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id:
|
|||||||
|
|
||||||
|
|
||||||
# TODO: needs lots of error handling
|
# TODO: needs lots of error handling
|
||||||
def push_images_operation(
|
def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext):
|
||||||
command_context: DeployCommandContext, deployment_context: DeploymentContext
|
|
||||||
):
|
|
||||||
# Get the list of images for the stack
|
# Get the list of images for the stack
|
||||||
cluster_context = command_context.cluster_context
|
cluster_context = command_context.cluster_context
|
||||||
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
||||||
@ -97,18 +88,14 @@ def push_images_operation(
|
|||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(
|
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
||||||
image, remote_repo_url, deployment_context.id
|
|
||||||
)
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Tagging {image} to {remote_tag}")
|
print(f"Tagging {image} to {remote_tag}")
|
||||||
docker.image.tag(image, remote_tag)
|
docker.image.tag(image, remote_tag)
|
||||||
# Run docker push commands to upload
|
# Run docker push commands to upload
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(
|
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
||||||
image, remote_repo_url, deployment_context.id
|
|
||||||
)
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Pushing image {remote_tag}")
|
print(f"Pushing image {remote_tag}")
|
||||||
docker.image.push(remote_tag)
|
docker.image.push(remote_tag)
|
||||||
|
|||||||
@ -14,45 +14,32 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import base64
|
|
||||||
|
|
||||||
from kubernetes import client
|
from kubernetes import client
|
||||||
from typing import Any, List, Optional, Set
|
from typing import Any, List, Set
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import env_var_map_from_file
|
from stack_orchestrator.util import env_var_map_from_file
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
|
||||||
named_volumes_from_pod_files,
|
|
||||||
volume_mounts_for_service,
|
|
||||||
volumes_for_pod_files,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs
|
||||||
envs_from_environment_variables_map,
|
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
||||||
envs_from_compose_file,
|
|
||||||
merge_envs,
|
|
||||||
translate_sidecar_service_names,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deploy_util import (
|
|
||||||
parsed_pod_files_map_from_file_names,
|
|
||||||
images_for_deployment,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
||||||
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
||||||
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
||||||
|
|
||||||
DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}})
|
DEFAULT_VOLUME_RESOURCES = Resources({
|
||||||
|
"reservations": {"storage": "2Gi"}
|
||||||
|
})
|
||||||
|
|
||||||
DEFAULT_CONTAINER_RESOURCES = Resources(
|
DEFAULT_CONTAINER_RESOURCES = Resources({
|
||||||
{
|
"reservations": {"cpus": "0.1", "memory": "200M"},
|
||||||
"reservations": {"cpus": "1.0", "memory": "2000M"},
|
"limits": {"cpus": "1.0", "memory": "2000M"},
|
||||||
"limits": {"cpus": "4.0", "memory": "8000M"},
|
})
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
||||||
def to_dict(limits: Optional[ResourceLimits]):
|
def to_dict(limits: ResourceLimits):
|
||||||
if not limits:
|
if not limits:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -66,103 +53,55 @@ def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequi
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
return client.V1ResourceRequirements(
|
return client.V1ResourceRequirements(
|
||||||
requests=to_dict(resources.reservations), limits=to_dict(resources.limits)
|
requests=to_dict(resources.reservations),
|
||||||
|
limits=to_dict(resources.limits)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ClusterInfo:
|
class ClusterInfo:
|
||||||
parsed_pod_yaml_map: Any
|
parsed_pod_yaml_map: Any
|
||||||
parsed_job_yaml_map: Any
|
|
||||||
image_set: Set[str] = set()
|
image_set: Set[str] = set()
|
||||||
app_name: str
|
app_name: str
|
||||||
stack_name: str
|
|
||||||
environment_variables: DeployEnvVars
|
environment_variables: DeployEnvVars
|
||||||
spec: Spec
|
spec: Spec
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.parsed_job_yaml_map = {}
|
pass
|
||||||
|
|
||||||
def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec, stack_name=""):
|
def int(self, pod_files: List[str], compose_env_file, deployment_name, spec: Spec):
|
||||||
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
# Find the set of images in the pods
|
# Find the set of images in the pods
|
||||||
self.image_set = images_for_deployment(pod_files)
|
self.image_set = images_for_deployment(pod_files)
|
||||||
# Filter out None values from env file
|
self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file))
|
||||||
env_vars = {
|
|
||||||
k: v for k, v in env_var_map_from_file(compose_env_file).items() if v
|
|
||||||
}
|
|
||||||
self.environment_variables = DeployEnvVars(env_vars)
|
|
||||||
self.app_name = deployment_name
|
self.app_name = deployment_name
|
||||||
self.stack_name = stack_name
|
|
||||||
self.spec = spec
|
self.spec = spec
|
||||||
if opts.o.debug:
|
if (opts.o.debug):
|
||||||
print(f"Env vars: {self.environment_variables.map}")
|
print(f"Env vars: {self.environment_variables.map}")
|
||||||
|
|
||||||
def init_jobs(self, job_files: List[str]):
|
def get_nodeport(self):
|
||||||
"""Initialize parsed job YAML map from job compose files."""
|
|
||||||
self.parsed_job_yaml_map = parsed_pod_files_map_from_file_names(job_files)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Parsed job yaml map: {self.parsed_job_yaml_map}")
|
|
||||||
|
|
||||||
def _all_named_volumes(self) -> list:
|
|
||||||
"""Return named volumes from both pod and job compose files."""
|
|
||||||
volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
|
||||||
volumes.extend(named_volumes_from_pod_files(self.parsed_job_yaml_map))
|
|
||||||
return volumes
|
|
||||||
|
|
||||||
def get_nodeports(self):
|
|
||||||
nodeports = []
|
|
||||||
for pod_name in self.parsed_pod_yaml_map:
|
for pod_name in self.parsed_pod_yaml_map:
|
||||||
pod = self.parsed_pod_yaml_map[pod_name]
|
pod = self.parsed_pod_yaml_map[pod_name]
|
||||||
services = pod["services"]
|
services = pod["services"]
|
||||||
for service_name in services:
|
for service_name in services:
|
||||||
service_info = services[service_name]
|
service_info = services[service_name]
|
||||||
if "ports" in service_info:
|
if "ports" in service_info:
|
||||||
for raw_port in [str(p) for p in service_info["ports"]]:
|
port = int(service_info["ports"][0])
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"service port: {raw_port}")
|
print(f"service port: {port}")
|
||||||
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
|
||||||
# protocol=UDP)
|
|
||||||
protocol = "TCP"
|
|
||||||
port_str = raw_port
|
|
||||||
if "/" in raw_port:
|
|
||||||
port_str, proto = raw_port.rsplit("/", 1)
|
|
||||||
protocol = proto.upper()
|
|
||||||
if ":" in port_str:
|
|
||||||
parts = port_str.split(":")
|
|
||||||
if len(parts) != 2:
|
|
||||||
raise Exception(f"Invalid port definition: {raw_port}")
|
|
||||||
node_port = int(parts[0])
|
|
||||||
pod_port = int(parts[1])
|
|
||||||
else:
|
|
||||||
node_port = None
|
|
||||||
pod_port = int(port_str)
|
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport"),
|
||||||
name=(
|
|
||||||
f"{self.app_name}-nodeport-"
|
|
||||||
f"{pod_port}-{protocol.lower()}"
|
|
||||||
),
|
|
||||||
labels={"app": self.app_name},
|
|
||||||
),
|
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="NodePort",
|
type="NodePort",
|
||||||
ports=[
|
ports=[client.V1ServicePort(
|
||||||
client.V1ServicePort(
|
port=port,
|
||||||
port=pod_port,
|
target_port=port
|
||||||
target_port=pod_port,
|
)],
|
||||||
node_port=node_port,
|
selector={"app": self.app_name}
|
||||||
protocol=protocol,
|
|
||||||
)
|
)
|
||||||
],
|
|
||||||
selector={"app": self.app_name},
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
nodeports.append(service)
|
return service
|
||||||
return nodeports
|
|
||||||
|
|
||||||
def get_ingress(
|
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
||||||
self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"
|
|
||||||
):
|
|
||||||
# No ingress for a deployment that has no http-proxy defined, for now
|
# No ingress for a deployment that has no http-proxy defined, for now
|
||||||
http_proxy_info_list = self.spec.get_http_proxy()
|
http_proxy_info_list = self.spec.get_http_proxy()
|
||||||
ingress = None
|
ingress = None
|
||||||
@ -174,20 +113,10 @@ class ClusterInfo:
|
|||||||
# TODO: good enough parsing for webapp deployment for now
|
# TODO: good enough parsing for webapp deployment for now
|
||||||
host_name = http_proxy_info["host-name"]
|
host_name = http_proxy_info["host-name"]
|
||||||
rules = []
|
rules = []
|
||||||
tls = (
|
tls = [client.V1IngressTLS(
|
||||||
[
|
hosts=certificate["spec"]["dnsNames"] if certificate else [host_name],
|
||||||
client.V1IngressTLS(
|
secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls"
|
||||||
hosts=certificate["spec"]["dnsNames"]
|
)] if use_tls else None
|
||||||
if certificate
|
|
||||||
else [host_name],
|
|
||||||
secret_name=certificate["spec"]["secretName"]
|
|
||||||
if certificate
|
|
||||||
else f"{self.app_name}-tls",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
if use_tls
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
paths = []
|
paths = []
|
||||||
for route in http_proxy_info["routes"]:
|
for route in http_proxy_info["routes"]:
|
||||||
path = route["path"]
|
path = route["path"]
|
||||||
@ -196,8 +125,7 @@ class ClusterInfo:
|
|||||||
print(f"proxy config: {path} -> {proxy_to}")
|
print(f"proxy config: {path} -> {proxy_to}")
|
||||||
# proxy_to has the form <service>:<port>
|
# proxy_to has the form <service>:<port>
|
||||||
proxy_to_port = int(proxy_to.split(":")[1])
|
proxy_to_port = int(proxy_to.split(":")[1])
|
||||||
paths.append(
|
paths.append(client.V1HTTPIngressPath(
|
||||||
client.V1HTTPIngressPath(
|
|
||||||
path_type="Prefix",
|
path_type="Prefix",
|
||||||
path=path,
|
path=path,
|
||||||
backend=client.V1IngressBackend(
|
backend=client.V1IngressBackend(
|
||||||
@ -205,20 +133,23 @@ class ClusterInfo:
|
|||||||
# TODO: this looks wrong
|
# TODO: this looks wrong
|
||||||
name=f"{self.app_name}-service",
|
name=f"{self.app_name}-service",
|
||||||
# TODO: pull port number from the service
|
# TODO: pull port number from the service
|
||||||
port=client.V1ServiceBackendPort(number=proxy_to_port),
|
port=client.V1ServiceBackendPort(number=proxy_to_port)
|
||||||
)
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
rules.append(
|
))
|
||||||
client.V1IngressRule(
|
rules.append(client.V1IngressRule(
|
||||||
host=host_name, http=client.V1HTTPIngressRuleValue(paths=paths)
|
host=host_name,
|
||||||
|
http=client.V1HTTPIngressRuleValue(
|
||||||
|
paths=paths
|
||||||
)
|
)
|
||||||
|
))
|
||||||
|
spec = client.V1IngressSpec(
|
||||||
|
tls=tls,
|
||||||
|
rules=rules
|
||||||
)
|
)
|
||||||
spec = client.V1IngressSpec(tls=tls, rules=rules)
|
|
||||||
|
|
||||||
ingress_annotations = {
|
ingress_annotations = {
|
||||||
"kubernetes.io/ingress.class": "caddy",
|
"kubernetes.io/ingress.class": "nginx",
|
||||||
}
|
}
|
||||||
if not certificate:
|
if not certificate:
|
||||||
ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
|
ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
|
||||||
@ -226,75 +157,56 @@ class ClusterInfo:
|
|||||||
ingress = client.V1Ingress(
|
ingress = client.V1Ingress(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(
|
||||||
name=f"{self.app_name}-ingress",
|
name=f"{self.app_name}-ingress",
|
||||||
labels={"app": self.app_name},
|
annotations=ingress_annotations
|
||||||
annotations=ingress_annotations,
|
|
||||||
),
|
),
|
||||||
spec=spec,
|
spec=spec
|
||||||
)
|
)
|
||||||
return ingress
|
return ingress
|
||||||
|
|
||||||
# TODO: suppoprt multiple services
|
# TODO: suppoprt multiple services
|
||||||
def get_service(self):
|
def get_service(self):
|
||||||
# Collect all ports from http-proxy routes
|
for pod_name in self.parsed_pod_yaml_map:
|
||||||
ports_set = set()
|
pod = self.parsed_pod_yaml_map[pod_name]
|
||||||
http_proxy_list = self.spec.get_http_proxy()
|
services = pod["services"]
|
||||||
if http_proxy_list:
|
for service_name in services:
|
||||||
for http_proxy in http_proxy_list:
|
service_info = services[service_name]
|
||||||
for route in http_proxy.get("routes", []):
|
if "ports" in service_info:
|
||||||
proxy_to = route.get("proxy-to", "")
|
port = int(service_info["ports"][0])
|
||||||
if ":" in proxy_to:
|
|
||||||
port = int(proxy_to.split(":")[1])
|
|
||||||
ports_set.add(port)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"http-proxy route port: {port}")
|
print(f"service port: {port}")
|
||||||
|
|
||||||
if not ports_set:
|
|
||||||
return None
|
|
||||||
|
|
||||||
service_ports = [
|
|
||||||
client.V1ServicePort(port=p, target_port=p, name=f"port-{p}")
|
|
||||||
for p in sorted(ports_set)
|
|
||||||
]
|
|
||||||
|
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||||
name=f"{self.app_name}-service",
|
|
||||||
labels={"app": self.app_name},
|
|
||||||
),
|
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="ClusterIP",
|
type="ClusterIP",
|
||||||
ports=service_ports,
|
ports=[client.V1ServicePort(
|
||||||
selector={"app": self.app_name},
|
port=port,
|
||||||
),
|
target_port=port
|
||||||
|
)],
|
||||||
|
selector={"app": self.app_name}
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return service
|
return service
|
||||||
|
|
||||||
def get_pvcs(self):
|
def get_pvcs(self):
|
||||||
result = []
|
result = []
|
||||||
spec_volumes = self.spec.get_volumes()
|
spec_volumes = self.spec.get_volumes()
|
||||||
named_volumes = self._all_named_volumes()
|
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||||
global_resources = self.spec.get_volume_resources()
|
resources = self.spec.get_volume_resources()
|
||||||
if not global_resources:
|
if not resources:
|
||||||
global_resources = DEFAULT_VOLUME_RESOURCES
|
resources = DEFAULT_VOLUME_RESOURCES
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Spec Volumes: {spec_volumes}")
|
print(f"Spec Volumes: {spec_volumes}")
|
||||||
print(f"Named Volumes: {named_volumes}")
|
print(f"Named Volumes: {named_volumes}")
|
||||||
print(f"Resources: {global_resources}")
|
print(f"Resources: {resources}")
|
||||||
for volume_name, volume_path in spec_volumes.items():
|
for volume_name, volume_path in spec_volumes.items():
|
||||||
if volume_name not in named_volumes:
|
if volume_name not in named_volumes:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"{volume_name} not in pod files")
|
print(f"{volume_name} not in pod files")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Per-volume resources override global, which overrides default.
|
|
||||||
vol_resources = (
|
|
||||||
self.spec.get_volume_resources_for(volume_name)
|
|
||||||
or global_resources
|
|
||||||
)
|
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"app": self.app_name,
|
"app": self.app_name,
|
||||||
"volume-label": f"{self.app_name}-{volume_name}",
|
"volume-label": f"{self.app_name}-{volume_name}"
|
||||||
}
|
}
|
||||||
if volume_path:
|
if volume_path:
|
||||||
storage_class_name = "manual"
|
storage_class_name = "manual"
|
||||||
@ -307,14 +219,12 @@ class ClusterInfo:
|
|||||||
spec = client.V1PersistentVolumeClaimSpec(
|
spec = client.V1PersistentVolumeClaimSpec(
|
||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
storage_class_name=storage_class_name,
|
storage_class_name=storage_class_name,
|
||||||
resources=to_k8s_resource_requirements(vol_resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
volume_name=k8s_volume_name,
|
volume_name=k8s_volume_name
|
||||||
)
|
)
|
||||||
pvc = client.V1PersistentVolumeClaim(
|
pvc = client.V1PersistentVolumeClaim(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
|
||||||
name=f"{self.app_name}-{volume_name}", labels=labels
|
spec=spec
|
||||||
),
|
|
||||||
spec=spec,
|
|
||||||
)
|
)
|
||||||
result.append(pvc)
|
result.append(pvc)
|
||||||
return result
|
return result
|
||||||
@ -322,35 +232,28 @@ class ClusterInfo:
|
|||||||
def get_configmaps(self):
|
def get_configmaps(self):
|
||||||
result = []
|
result = []
|
||||||
spec_configmaps = self.spec.get_configmaps()
|
spec_configmaps = self.spec.get_configmaps()
|
||||||
named_volumes = self._all_named_volumes()
|
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||||
for cfg_map_name, cfg_map_path in spec_configmaps.items():
|
for cfg_map_name, cfg_map_path in spec_configmaps.items():
|
||||||
if cfg_map_name not in named_volumes:
|
if cfg_map_name not in named_volumes:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"{cfg_map_name} not in pod files")
|
print(f"{cfg_map_name} not in pod files")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not cfg_map_path.startswith("/") and self.spec.file_path is not None:
|
if not cfg_map_path.startswith("/"):
|
||||||
cfg_map_path = os.path.join(
|
cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path)
|
||||||
os.path.dirname(str(self.spec.file_path)), cfg_map_path
|
|
||||||
)
|
|
||||||
|
|
||||||
# Read in all the files at a single-level of the directory.
|
# Read in all the files at a single-level of the directory. This mimics the behavior
|
||||||
# This mimics the behavior of
|
# of `kubectl create configmap foo --from-file=/path/to/dir`
|
||||||
# `kubectl create configmap foo --from-file=/path/to/dir`
|
|
||||||
data = {}
|
data = {}
|
||||||
for f in os.listdir(cfg_map_path):
|
for f in os.listdir(cfg_map_path):
|
||||||
full_path = os.path.join(cfg_map_path, f)
|
full_path = os.path.join(cfg_map_path, f)
|
||||||
if os.path.isfile(full_path):
|
if os.path.isfile(full_path):
|
||||||
data[f] = base64.b64encode(open(full_path, "rb").read()).decode(
|
data[f] = open(full_path, 'rt').read()
|
||||||
"ASCII"
|
|
||||||
)
|
|
||||||
|
|
||||||
spec = client.V1ConfigMap(
|
spec = client.V1ConfigMap(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
||||||
name=f"{self.app_name}-{cfg_map_name}",
|
labels={"configmap-label": cfg_map_name}),
|
||||||
labels={"app": self.app_name, "configmap-label": cfg_map_name},
|
data=data
|
||||||
),
|
|
||||||
binary_data=data,
|
|
||||||
)
|
)
|
||||||
result.append(spec)
|
result.append(spec)
|
||||||
return result
|
return result
|
||||||
@ -358,20 +261,16 @@ class ClusterInfo:
|
|||||||
def get_pvs(self):
|
def get_pvs(self):
|
||||||
result = []
|
result = []
|
||||||
spec_volumes = self.spec.get_volumes()
|
spec_volumes = self.spec.get_volumes()
|
||||||
named_volumes = self._all_named_volumes()
|
named_volumes = named_volumes_from_pod_files(self.parsed_pod_yaml_map)
|
||||||
global_resources = self.spec.get_volume_resources()
|
resources = self.spec.get_volume_resources()
|
||||||
if not global_resources:
|
if not resources:
|
||||||
global_resources = DEFAULT_VOLUME_RESOURCES
|
resources = DEFAULT_VOLUME_RESOURCES
|
||||||
for volume_name, volume_path in spec_volumes.items():
|
for volume_name, volume_path in spec_volumes.items():
|
||||||
# We only need to create a volume if it is fully qualified HostPath.
|
# We only need to create a volume if it is fully qualified HostPath.
|
||||||
# Otherwise, we create the PVC and expect the node to allocate the volume
|
# Otherwise, we create the PVC and expect the node to allocate the volume for us.
|
||||||
# for us.
|
|
||||||
if not volume_path:
|
if not volume_path:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(
|
print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.")
|
||||||
f"{volume_name} does not require an explicit "
|
|
||||||
"PersistentVolume, since it is not a bind-mount."
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if volume_name not in named_volumes:
|
if volume_name not in named_volumes:
|
||||||
@ -380,250 +279,80 @@ class ClusterInfo:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not os.path.isabs(volume_path):
|
if not os.path.isabs(volume_path):
|
||||||
# For k8s-kind, allow relative paths:
|
print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.")
|
||||||
# - PV uses /mnt/{volume_name} (path inside kind node)
|
|
||||||
# - extraMounts resolve the relative path to Docker Host
|
|
||||||
if not self.spec.is_kind_deployment():
|
|
||||||
print(
|
|
||||||
f"WARNING: {volume_name}:{volume_path} is not absolute, "
|
|
||||||
"cannot bind volume."
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
vol_resources = (
|
|
||||||
self.spec.get_volume_resources_for(volume_name)
|
|
||||||
or global_resources
|
|
||||||
)
|
|
||||||
if self.spec.is_kind_deployment():
|
if self.spec.is_kind_deployment():
|
||||||
host_path = client.V1HostPathVolumeSource(
|
host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name))
|
||||||
path=get_kind_pv_bind_mount_path(volume_name)
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
||||||
spec = client.V1PersistentVolumeSpec(
|
spec = client.V1PersistentVolumeSpec(
|
||||||
storage_class_name="manual",
|
storage_class_name="manual",
|
||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
capacity=to_k8s_resource_requirements(vol_resources).requests,
|
capacity=to_k8s_resource_requirements(resources).requests,
|
||||||
host_path=host_path,
|
host_path=host_path
|
||||||
)
|
)
|
||||||
pv = client.V1PersistentVolume(
|
pv = client.V1PersistentVolume(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
|
||||||
name=f"{self.app_name}-{volume_name}",
|
labels={"volume-label": f"{self.app_name}-{volume_name}"}),
|
||||||
labels={
|
|
||||||
"app": self.app_name,
|
|
||||||
"volume-label": f"{self.app_name}-{volume_name}",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
spec=spec,
|
spec=spec,
|
||||||
)
|
)
|
||||||
result.append(pv)
|
result.append(pv)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _any_service_has_host_network(self):
|
# TODO: put things like image pull policy into an object-scope struct
|
||||||
|
def get_deployment(self, image_pull_policy: str = None):
|
||||||
|
containers = []
|
||||||
|
resources = self.spec.get_container_resources()
|
||||||
|
if not resources:
|
||||||
|
resources = DEFAULT_CONTAINER_RESOURCES
|
||||||
for pod_name in self.parsed_pod_yaml_map:
|
for pod_name in self.parsed_pod_yaml_map:
|
||||||
pod = self.parsed_pod_yaml_map[pod_name]
|
pod = self.parsed_pod_yaml_map[pod_name]
|
||||||
for svc in pod.get("services", {}).values():
|
|
||||||
if svc.get("network_mode") == "host":
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _resolve_container_resources(
|
|
||||||
self, container_name: str, service_info: dict, global_resources: Resources
|
|
||||||
) -> Resources:
|
|
||||||
"""Resolve resources for a container using layered priority.
|
|
||||||
|
|
||||||
Priority: spec per-container > compose deploy.resources
|
|
||||||
> spec global > DEFAULT
|
|
||||||
"""
|
|
||||||
# 1. Check spec.yml for per-container override
|
|
||||||
per_container = self.spec.get_container_resources_for(container_name)
|
|
||||||
if per_container:
|
|
||||||
return per_container
|
|
||||||
|
|
||||||
# 2. Check compose service_info for deploy.resources
|
|
||||||
deploy_block = service_info.get("deploy", {})
|
|
||||||
compose_resources = deploy_block.get("resources", {}) if deploy_block else {}
|
|
||||||
if compose_resources:
|
|
||||||
return Resources(compose_resources)
|
|
||||||
|
|
||||||
# 3. Fall back to spec.yml global (already resolved with DEFAULT fallback)
|
|
||||||
return global_resources
|
|
||||||
|
|
||||||
def _build_containers(
|
|
||||||
self,
|
|
||||||
parsed_yaml_map: Any,
|
|
||||||
image_pull_policy: Optional[str] = None,
|
|
||||||
) -> tuple:
|
|
||||||
"""Build k8s container specs from parsed compose YAML.
|
|
||||||
|
|
||||||
Returns a tuple of (containers, init_containers, services, volumes)
|
|
||||||
where:
|
|
||||||
- containers: list of V1Container objects
|
|
||||||
- init_containers: list of V1Container objects for init containers
|
|
||||||
(compose services with label ``laconic.init-container: "true"``)
|
|
||||||
- services: the last services dict processed (used for annotations/labels)
|
|
||||||
- volumes: list of V1Volume objects
|
|
||||||
"""
|
|
||||||
containers = []
|
|
||||||
init_containers = []
|
|
||||||
services = {}
|
|
||||||
global_resources = self.spec.get_container_resources()
|
|
||||||
if not global_resources:
|
|
||||||
global_resources = DEFAULT_CONTAINER_RESOURCES
|
|
||||||
for pod_name in parsed_yaml_map:
|
|
||||||
pod = parsed_yaml_map[pod_name]
|
|
||||||
services = pod["services"]
|
services = pod["services"]
|
||||||
for service_name in services:
|
for service_name in services:
|
||||||
container_name = service_name
|
container_name = service_name
|
||||||
service_info = services[service_name]
|
service_info = services[service_name]
|
||||||
image = service_info["image"]
|
image = service_info["image"]
|
||||||
container_ports = []
|
|
||||||
if "ports" in service_info:
|
if "ports" in service_info:
|
||||||
for raw_port in [str(p) for p in service_info["ports"]]:
|
port = int(service_info["ports"][0])
|
||||||
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
|
||||||
# protocol=UDP)
|
|
||||||
protocol = "TCP"
|
|
||||||
port_str = raw_port
|
|
||||||
if "/" in raw_port:
|
|
||||||
port_str, proto = raw_port.rsplit("/", 1)
|
|
||||||
protocol = proto.upper()
|
|
||||||
# Handle host:container port mapping - use container port
|
|
||||||
if ":" in port_str:
|
|
||||||
port_str = port_str.split(":")[-1]
|
|
||||||
port = int(port_str)
|
|
||||||
container_ports.append(
|
|
||||||
client.V1ContainerPort(
|
|
||||||
container_port=port, protocol=protocol
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image: {image}")
|
print(f"image: {image}")
|
||||||
print(f"service ports: {container_ports}")
|
print(f"service port: {port}")
|
||||||
merged_envs = (
|
merged_envs = merge_envs(
|
||||||
merge_envs(
|
|
||||||
envs_from_compose_file(
|
envs_from_compose_file(
|
||||||
service_info["environment"], self.environment_variables.map
|
service_info["environment"]), self.environment_variables.map
|
||||||
),
|
) if "environment" in service_info else self.environment_variables.map
|
||||||
self.environment_variables.map,
|
|
||||||
)
|
|
||||||
if "environment" in service_info
|
|
||||||
else self.environment_variables.map
|
|
||||||
)
|
|
||||||
# Translate docker-compose service names to localhost for sidecars
|
|
||||||
# All services in the same pod share the network namespace
|
|
||||||
sibling_services = [s for s in services.keys() if s != service_name]
|
|
||||||
merged_envs = translate_sidecar_service_names(
|
|
||||||
merged_envs, sibling_services
|
|
||||||
)
|
|
||||||
envs = envs_from_environment_variables_map(merged_envs)
|
envs = envs_from_environment_variables_map(merged_envs)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Merged envs: {envs}")
|
print(f"Merged envs: {envs}")
|
||||||
# Re-write the image tag for remote deployment
|
# Re-write the image tag for remote deployment
|
||||||
# Note self.app_name has the same value as deployment_id
|
# Note self.app_name has the same value as deployment_id
|
||||||
image_to_use = (
|
image_to_use = remote_tag_for_image_unique(
|
||||||
remote_tag_for_image_unique(
|
image,
|
||||||
image, self.spec.get_image_registry(), self.app_name
|
self.spec.get_image_registry(),
|
||||||
)
|
self.app_name) if self.spec.get_image_registry() is not None else image
|
||||||
if self.spec.get_image_registry() is not None
|
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
|
||||||
else image
|
|
||||||
)
|
|
||||||
volume_mounts = volume_mounts_for_service(
|
|
||||||
parsed_yaml_map, service_name
|
|
||||||
)
|
|
||||||
# Handle command/entrypoint from compose file
|
|
||||||
# In docker-compose: entrypoint -> k8s command, command -> k8s args
|
|
||||||
container_command = None
|
|
||||||
container_args = None
|
|
||||||
if "entrypoint" in service_info:
|
|
||||||
entrypoint = service_info["entrypoint"]
|
|
||||||
container_command = (
|
|
||||||
entrypoint if isinstance(entrypoint, list) else [entrypoint]
|
|
||||||
)
|
|
||||||
if "command" in service_info:
|
|
||||||
cmd = service_info["command"]
|
|
||||||
container_args = cmd if isinstance(cmd, list) else cmd.split()
|
|
||||||
# Add env_from to pull secrets from K8s Secret
|
|
||||||
secret_name = f"{self.app_name}-generated-secrets"
|
|
||||||
env_from = [
|
|
||||||
client.V1EnvFromSource(
|
|
||||||
secret_ref=client.V1SecretEnvSource(
|
|
||||||
name=secret_name,
|
|
||||||
optional=True, # Don't fail if no secrets
|
|
||||||
)
|
|
||||||
)
|
|
||||||
]
|
|
||||||
# Mount user-declared secrets from spec.yml
|
|
||||||
for user_secret_name in self.spec.get_secrets():
|
|
||||||
env_from.append(
|
|
||||||
client.V1EnvFromSource(
|
|
||||||
secret_ref=client.V1SecretEnvSource(
|
|
||||||
name=user_secret_name,
|
|
||||||
optional=True,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
container_resources = self._resolve_container_resources(
|
|
||||||
container_name, service_info, global_resources
|
|
||||||
)
|
|
||||||
container = client.V1Container(
|
container = client.V1Container(
|
||||||
name=container_name,
|
name=container_name,
|
||||||
image=image_to_use,
|
image=image_to_use,
|
||||||
image_pull_policy=image_pull_policy,
|
image_pull_policy=image_pull_policy,
|
||||||
command=container_command,
|
|
||||||
args=container_args,
|
|
||||||
env=envs,
|
env=envs,
|
||||||
env_from=env_from,
|
ports=[client.V1ContainerPort(container_port=port)],
|
||||||
ports=container_ports if container_ports else None,
|
|
||||||
volume_mounts=volume_mounts,
|
volume_mounts=volume_mounts,
|
||||||
security_context=client.V1SecurityContext(
|
security_context=client.V1SecurityContext(
|
||||||
privileged=self.spec.get_privileged(),
|
privileged=self.spec.get_privileged(),
|
||||||
run_as_user=int(service_info["user"]) if "user" in service_info else None,
|
|
||||||
capabilities=client.V1Capabilities(
|
capabilities=client.V1Capabilities(
|
||||||
add=self.spec.get_capabilities()
|
add=self.spec.get_capabilities()
|
||||||
)
|
) if self.spec.get_capabilities() else None
|
||||||
if self.spec.get_capabilities()
|
|
||||||
else None,
|
|
||||||
),
|
),
|
||||||
resources=to_k8s_resource_requirements(container_resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
)
|
)
|
||||||
# Services with laconic.init-container label become
|
|
||||||
# k8s init containers instead of regular containers.
|
|
||||||
svc_labels = service_info.get("labels", {})
|
|
||||||
if isinstance(svc_labels, list):
|
|
||||||
# docker-compose labels can be a list of "key=value"
|
|
||||||
svc_labels = dict(
|
|
||||||
item.split("=", 1) for item in svc_labels
|
|
||||||
)
|
|
||||||
is_init = str(
|
|
||||||
svc_labels.get("laconic.init-container", "")
|
|
||||||
).lower() in ("true", "1", "yes")
|
|
||||||
if is_init:
|
|
||||||
init_containers.append(container)
|
|
||||||
else:
|
|
||||||
containers.append(container)
|
containers.append(container)
|
||||||
volumes = volumes_for_pod_files(
|
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name)
|
||||||
parsed_yaml_map, self.spec, self.app_name
|
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
||||||
)
|
|
||||||
return containers, init_containers, services, volumes
|
|
||||||
|
|
||||||
# TODO: put things like image pull policy into an object-scope struct
|
|
||||||
def get_deployment(self, image_pull_policy: Optional[str] = None):
|
|
||||||
containers, init_containers, services, volumes = self._build_containers(
|
|
||||||
self.parsed_pod_yaml_map, image_pull_policy
|
|
||||||
)
|
|
||||||
registry_config = self.spec.get_image_registry_config()
|
|
||||||
if registry_config:
|
|
||||||
secret_name = f"{self.app_name}-registry"
|
|
||||||
image_pull_secrets = [client.V1LocalObjectReference(name=secret_name)]
|
|
||||||
else:
|
|
||||||
image_pull_secrets = []
|
|
||||||
|
|
||||||
annotations = None
|
annotations = None
|
||||||
labels = {"app": self.app_name}
|
labels = {"app": self.app_name}
|
||||||
if self.stack_name:
|
|
||||||
labels["app.kubernetes.io/stack"] = self.stack_name
|
|
||||||
affinity = None
|
|
||||||
tolerations = None
|
|
||||||
|
|
||||||
if self.spec.get_annotations():
|
if self.spec.get_annotations():
|
||||||
annotations = {}
|
annotations = {}
|
||||||
@ -636,145 +365,22 @@ class ClusterInfo:
|
|||||||
for service_name in services:
|
for service_name in services:
|
||||||
labels[key.replace("{name}", service_name)] = value
|
labels[key.replace("{name}", service_name)] = value
|
||||||
|
|
||||||
if self.spec.get_node_affinities():
|
|
||||||
affinities = []
|
|
||||||
for rule in self.spec.get_node_affinities():
|
|
||||||
# TODO add some input validation here
|
|
||||||
label_name = rule["label"]
|
|
||||||
label_value = rule["value"]
|
|
||||||
affinities.append(
|
|
||||||
client.V1NodeSelectorTerm(
|
|
||||||
match_expressions=[
|
|
||||||
client.V1NodeSelectorRequirement(
|
|
||||||
key=label_name, operator="In", values=[label_value]
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
affinity = client.V1Affinity(
|
|
||||||
node_affinity=client.V1NodeAffinity(
|
|
||||||
required_during_scheduling_ignored_during_execution=(
|
|
||||||
client.V1NodeSelector(node_selector_terms=affinities)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.spec.get_node_tolerations():
|
|
||||||
tolerations = []
|
|
||||||
for toleration in self.spec.get_node_tolerations():
|
|
||||||
# TODO add some input validation here
|
|
||||||
toleration_key = toleration["key"]
|
|
||||||
toleration_value = toleration["value"]
|
|
||||||
tolerations.append(
|
|
||||||
client.V1Toleration(
|
|
||||||
effect="NoSchedule",
|
|
||||||
key=toleration_key,
|
|
||||||
operator="Equal",
|
|
||||||
value=toleration_value,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
use_host_network = self._any_service_has_host_network()
|
|
||||||
template = client.V1PodTemplateSpec(
|
template = client.V1PodTemplateSpec(
|
||||||
metadata=client.V1ObjectMeta(annotations=annotations, labels=labels),
|
metadata=client.V1ObjectMeta(
|
||||||
spec=client.V1PodSpec(
|
annotations=annotations,
|
||||||
containers=containers,
|
labels=labels
|
||||||
init_containers=init_containers or None,
|
|
||||||
image_pull_secrets=image_pull_secrets,
|
|
||||||
volumes=volumes,
|
|
||||||
affinity=affinity,
|
|
||||||
tolerations=tolerations,
|
|
||||||
runtime_class_name=self.spec.get_runtime_class(),
|
|
||||||
host_network=use_host_network or None,
|
|
||||||
dns_policy=("ClusterFirstWithHostNet" if use_host_network else None),
|
|
||||||
),
|
),
|
||||||
|
spec=client.V1PodSpec(containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes),
|
||||||
)
|
)
|
||||||
spec = client.V1DeploymentSpec(
|
spec = client.V1DeploymentSpec(
|
||||||
replicas=self.spec.get_replicas(),
|
replicas=1, template=template, selector={
|
||||||
template=template,
|
"matchLabels":
|
||||||
selector={"matchLabels": {"app": self.app_name}},
|
{"app": self.app_name}})
|
||||||
)
|
|
||||||
|
|
||||||
deployment = client.V1Deployment(
|
deployment = client.V1Deployment(
|
||||||
api_version="apps/v1",
|
api_version="apps/v1",
|
||||||
kind="Deployment",
|
kind="Deployment",
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-deployment"),
|
||||||
name=f"{self.app_name}-deployment",
|
|
||||||
labels={"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})},
|
|
||||||
),
|
|
||||||
spec=spec,
|
spec=spec,
|
||||||
)
|
)
|
||||||
return deployment
|
return deployment
|
||||||
|
|
||||||
def get_jobs(self, image_pull_policy: Optional[str] = None) -> List[client.V1Job]:
|
|
||||||
"""Build k8s Job objects from parsed job compose files.
|
|
||||||
|
|
||||||
Each job compose file produces a V1Job with:
|
|
||||||
- restartPolicy: Never
|
|
||||||
- backoffLimit: 0
|
|
||||||
- Name: {app_name}-job-{job_name}
|
|
||||||
"""
|
|
||||||
if not self.parsed_job_yaml_map:
|
|
||||||
return []
|
|
||||||
|
|
||||||
jobs = []
|
|
||||||
registry_config = self.spec.get_image_registry_config()
|
|
||||||
if registry_config:
|
|
||||||
secret_name = f"{self.app_name}-registry"
|
|
||||||
image_pull_secrets = [client.V1LocalObjectReference(name=secret_name)]
|
|
||||||
else:
|
|
||||||
image_pull_secrets = []
|
|
||||||
|
|
||||||
for job_file in self.parsed_job_yaml_map:
|
|
||||||
# Build containers for this single job file
|
|
||||||
single_job_map = {job_file: self.parsed_job_yaml_map[job_file]}
|
|
||||||
containers, init_containers, _services, volumes = (
|
|
||||||
self._build_containers(single_job_map, image_pull_policy)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Derive job name from file path: docker-compose-<name>.yml -> <name>
|
|
||||||
base = os.path.basename(job_file)
|
|
||||||
# Strip docker-compose- prefix and .yml suffix
|
|
||||||
job_name = base
|
|
||||||
if job_name.startswith("docker-compose-"):
|
|
||||||
job_name = job_name[len("docker-compose-"):]
|
|
||||||
if job_name.endswith(".yml"):
|
|
||||||
job_name = job_name[: -len(".yml")]
|
|
||||||
elif job_name.endswith(".yaml"):
|
|
||||||
job_name = job_name[: -len(".yaml")]
|
|
||||||
|
|
||||||
# Use a distinct app label for job pods so they don't get
|
|
||||||
# picked up by pods_in_deployment() which queries app={app_name}.
|
|
||||||
pod_labels = {
|
|
||||||
"app": f"{self.app_name}-job",
|
|
||||||
**({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {}),
|
|
||||||
}
|
|
||||||
template = client.V1PodTemplateSpec(
|
|
||||||
metadata=client.V1ObjectMeta(
|
|
||||||
labels=pod_labels
|
|
||||||
),
|
|
||||||
spec=client.V1PodSpec(
|
|
||||||
containers=containers,
|
|
||||||
init_containers=init_containers or None,
|
|
||||||
image_pull_secrets=image_pull_secrets,
|
|
||||||
volumes=volumes,
|
|
||||||
restart_policy="Never",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
job_spec = client.V1JobSpec(
|
|
||||||
template=template,
|
|
||||||
backoff_limit=0,
|
|
||||||
)
|
|
||||||
job_labels = {"app": self.app_name, **({"app.kubernetes.io/stack": self.stack_name} if self.stack_name else {})}
|
|
||||||
job = client.V1Job(
|
|
||||||
api_version="batch/v1",
|
|
||||||
kind="Job",
|
|
||||||
metadata=client.V1ObjectMeta(
|
|
||||||
name=f"{self.app_name}-job-{job_name}",
|
|
||||||
labels=job_labels,
|
|
||||||
),
|
|
||||||
spec=job_spec,
|
|
||||||
)
|
|
||||||
jobs.append(job)
|
|
||||||
|
|
||||||
return jobs
|
|
||||||
|
|||||||
@ -16,30 +16,13 @@ from datetime import datetime, timezone
|
|||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from kubernetes.client.exceptions import ApiException
|
|
||||||
from typing import Any, Dict, List, Optional, cast
|
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
|
||||||
create_cluster,
|
from stack_orchestrator.deploy.k8s.helpers import install_ingress_for_kind, wait_for_ingress_in_kind
|
||||||
destroy_cluster,
|
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, containers_in_pod, log_stream_from_string
|
||||||
load_images_into_kind,
|
from stack_orchestrator.deploy.k8s.helpers import generate_kind_config
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
|
||||||
install_ingress_for_kind,
|
|
||||||
wait_for_ingress_in_kind,
|
|
||||||
is_ingress_running,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
|
||||||
pods_in_deployment,
|
|
||||||
containers_in_pod,
|
|
||||||
log_stream_from_string,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.k8s.helpers import (
|
|
||||||
generate_kind_config,
|
|
||||||
generate_high_memlock_spec_json,
|
|
||||||
)
|
|
||||||
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
@ -52,7 +35,7 @@ class AttrDict(dict):
|
|||||||
self.__dict__ = self
|
self.__dict__ = self
|
||||||
|
|
||||||
|
|
||||||
def _check_delete_exception(e: ApiException) -> None:
|
def _check_delete_exception(e: client.exceptions.ApiException):
|
||||||
if e.status == 404:
|
if e.status == 404:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Failed to delete object, continuing")
|
print("Failed to delete object, continuing")
|
||||||
@ -60,90 +43,31 @@ def _check_delete_exception(e: ApiException) -> None:
|
|||||||
error_exit(f"k8s api error: {e}")
|
error_exit(f"k8s api error: {e}")
|
||||||
|
|
||||||
|
|
||||||
def _create_runtime_class(name: str, handler: str):
|
|
||||||
"""Create a RuntimeClass resource for custom containerd runtime handlers.
|
|
||||||
|
|
||||||
RuntimeClass allows pods to specify which runtime handler to use, enabling
|
|
||||||
different pods to have different rlimit profiles (e.g., high-memlock).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name: The name of the RuntimeClass resource
|
|
||||||
handler: The containerd runtime handler name
|
|
||||||
(must match containerdConfigPatches)
|
|
||||||
"""
|
|
||||||
api = client.NodeV1Api()
|
|
||||||
runtime_class = client.V1RuntimeClass(
|
|
||||||
api_version="node.k8s.io/v1",
|
|
||||||
kind="RuntimeClass",
|
|
||||||
metadata=client.V1ObjectMeta(name=name),
|
|
||||||
handler=handler,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
api.create_runtime_class(runtime_class)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Created RuntimeClass: {name}")
|
|
||||||
except ApiException as e:
|
|
||||||
if e.status == 409: # Already exists
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"RuntimeClass {name} already exists")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
class K8sDeployer(Deployer):
|
class K8sDeployer(Deployer):
|
||||||
name: str = "k8s"
|
name: str = "k8s"
|
||||||
type: str
|
type: str
|
||||||
core_api: client.CoreV1Api
|
core_api: client.CoreV1Api
|
||||||
apps_api: client.AppsV1Api
|
apps_api: client.AppsV1Api
|
||||||
batch_api: client.BatchV1Api
|
|
||||||
networking_api: client.NetworkingV1Api
|
networking_api: client.NetworkingV1Api
|
||||||
k8s_namespace: str
|
k8s_namespace: str = "default"
|
||||||
kind_cluster_name: str
|
kind_cluster_name: str
|
||||||
skip_cluster_management: bool
|
|
||||||
cluster_info: ClusterInfo
|
cluster_info: ClusterInfo
|
||||||
deployment_dir: Path
|
deployment_dir: Path
|
||||||
deployment_context: DeploymentContext
|
deployment_context: DeploymentContext
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
||||||
self,
|
|
||||||
type,
|
|
||||||
deployment_context: DeploymentContext,
|
|
||||||
compose_files,
|
|
||||||
compose_project_name,
|
|
||||||
compose_env_file,
|
|
||||||
job_compose_files=None,
|
|
||||||
) -> None:
|
|
||||||
self.type = type
|
self.type = type
|
||||||
self.skip_cluster_management = False
|
# TODO: workaround pending refactoring above to cope with being created with a null deployment_context
|
||||||
self.k8s_namespace = "default" # Will be overridden below if context exists
|
|
||||||
# TODO: workaround pending refactoring above to cope with being
|
|
||||||
# created with a null deployment_context
|
|
||||||
if deployment_context is None:
|
if deployment_context is None:
|
||||||
return
|
return
|
||||||
self.deployment_dir = deployment_context.deployment_dir
|
self.deployment_dir = deployment_context.deployment_dir
|
||||||
self.deployment_context = deployment_context
|
self.deployment_context = deployment_context
|
||||||
self.kind_cluster_name = deployment_context.spec.get_kind_cluster_name() or compose_project_name
|
self.kind_cluster_name = compose_project_name
|
||||||
# Use spec namespace if provided, otherwise derive from cluster-id
|
|
||||||
self.k8s_namespace = deployment_context.spec.get_namespace() or f"laconic-{compose_project_name}"
|
|
||||||
self.cluster_info = ClusterInfo()
|
self.cluster_info = ClusterInfo()
|
||||||
# stack.name may be an absolute path (from spec "stack:" key after
|
self.cluster_info.int(compose_files, compose_env_file, compose_project_name, deployment_context.spec)
|
||||||
# path resolution). Extract just the directory basename for labels.
|
if (opts.o.debug):
|
||||||
raw_name = deployment_context.stack.name if deployment_context else ""
|
|
||||||
stack_name = Path(raw_name).name if raw_name else ""
|
|
||||||
self.cluster_info.int(
|
|
||||||
compose_files,
|
|
||||||
compose_env_file,
|
|
||||||
compose_project_name,
|
|
||||||
deployment_context.spec,
|
|
||||||
stack_name=stack_name,
|
|
||||||
)
|
|
||||||
# Initialize job compose files if provided
|
|
||||||
if job_compose_files:
|
|
||||||
self.cluster_info.init_jobs(job_compose_files)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Deployment dir: {deployment_context.deployment_dir}")
|
print(f"Deployment dir: {deployment_context.deployment_dir}")
|
||||||
print(f"Compose files: {compose_files}")
|
print(f"Compose files: {compose_files}")
|
||||||
print(f"Job compose files: {job_compose_files}")
|
|
||||||
print(f"Project name: {compose_project_name}")
|
print(f"Project name: {compose_project_name}")
|
||||||
print(f"Env file: {compose_env_file}")
|
print(f"Env file: {compose_env_file}")
|
||||||
print(f"Type: {type}")
|
print(f"Type: {type}")
|
||||||
@ -153,144 +77,12 @@ class K8sDeployer(Deployer):
|
|||||||
config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
|
config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
|
||||||
else:
|
else:
|
||||||
# Get the config file and pass to load_kube_config()
|
# Get the config file and pass to load_kube_config()
|
||||||
config.load_kube_config(
|
config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix())
|
||||||
config_file=self.deployment_dir.joinpath(
|
|
||||||
constants.kube_config_filename
|
|
||||||
).as_posix()
|
|
||||||
)
|
|
||||||
self.core_api = client.CoreV1Api()
|
self.core_api = client.CoreV1Api()
|
||||||
self.networking_api = client.NetworkingV1Api()
|
self.networking_api = client.NetworkingV1Api()
|
||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
self.batch_api = client.BatchV1Api()
|
|
||||||
self.custom_obj_api = client.CustomObjectsApi()
|
self.custom_obj_api = client.CustomObjectsApi()
|
||||||
|
|
||||||
def _ensure_namespace(self):
|
|
||||||
"""Create the deployment namespace if it doesn't exist."""
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Dry run: would create namespace {self.k8s_namespace}")
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
self.core_api.read_namespace(name=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Namespace {self.k8s_namespace} already exists")
|
|
||||||
except ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
# Create the namespace
|
|
||||||
ns = client.V1Namespace(
|
|
||||||
metadata=client.V1ObjectMeta(
|
|
||||||
name=self.k8s_namespace,
|
|
||||||
labels={"app": self.cluster_info.app_name},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self.core_api.create_namespace(body=ns)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Created namespace {self.k8s_namespace}")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _delete_namespace(self):
|
|
||||||
"""Delete the deployment namespace and all resources within it."""
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Dry run: would delete namespace {self.k8s_namespace}")
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
self.core_api.delete_namespace(name=self.k8s_namespace)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Deleted namespace {self.k8s_namespace}")
|
|
||||||
except ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Namespace {self.k8s_namespace} not found")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _delete_resources_by_label(self, label_selector: str, delete_volumes: bool):
|
|
||||||
"""Delete only this stack's resources from a shared namespace."""
|
|
||||||
ns = self.k8s_namespace
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Dry run: would delete resources with {label_selector} in {ns}")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Deployments
|
|
||||||
try:
|
|
||||||
deps = self.apps_api.list_namespaced_deployment(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for dep in deps.items:
|
|
||||||
print(f"Deleting Deployment {dep.metadata.name}")
|
|
||||||
self.apps_api.delete_namespaced_deployment(
|
|
||||||
name=dep.metadata.name, namespace=ns
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
# Jobs
|
|
||||||
try:
|
|
||||||
jobs = self.batch_api.list_namespaced_job(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for job in jobs.items:
|
|
||||||
print(f"Deleting Job {job.metadata.name}")
|
|
||||||
self.batch_api.delete_namespaced_job(
|
|
||||||
name=job.metadata.name, namespace=ns,
|
|
||||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
# Services (NodePorts created by SO)
|
|
||||||
try:
|
|
||||||
svcs = self.core_api.list_namespaced_service(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for svc in svcs.items:
|
|
||||||
print(f"Deleting Service {svc.metadata.name}")
|
|
||||||
self.core_api.delete_namespaced_service(
|
|
||||||
name=svc.metadata.name, namespace=ns
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
# Ingresses
|
|
||||||
try:
|
|
||||||
ings = self.networking_api.list_namespaced_ingress(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for ing in ings.items:
|
|
||||||
print(f"Deleting Ingress {ing.metadata.name}")
|
|
||||||
self.networking_api.delete_namespaced_ingress(
|
|
||||||
name=ing.metadata.name, namespace=ns
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
# ConfigMaps
|
|
||||||
try:
|
|
||||||
cms = self.core_api.list_namespaced_config_map(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for cm in cms.items:
|
|
||||||
print(f"Deleting ConfigMap {cm.metadata.name}")
|
|
||||||
self.core_api.delete_namespaced_config_map(
|
|
||||||
name=cm.metadata.name, namespace=ns
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
# PVCs (only if --delete-volumes)
|
|
||||||
if delete_volumes:
|
|
||||||
try:
|
|
||||||
pvcs = self.core_api.list_namespaced_persistent_volume_claim(
|
|
||||||
namespace=ns, label_selector=label_selector
|
|
||||||
)
|
|
||||||
for pvc in pvcs.items:
|
|
||||||
print(f"Deleting PVC {pvc.metadata.name}")
|
|
||||||
self.core_api.delete_namespaced_persistent_volume_claim(
|
|
||||||
name=pvc.metadata.name, namespace=ns
|
|
||||||
)
|
|
||||||
except ApiException as e:
|
|
||||||
_check_delete_exception(e)
|
|
||||||
|
|
||||||
def _create_volume_data(self):
|
def _create_volume_data(self):
|
||||||
# Create the host-path-mounted PVs for this deployment
|
# Create the host-path-mounted PVs for this deployment
|
||||||
pvs = self.cluster_info.get_pvs()
|
pvs = self.cluster_info.get_pvs()
|
||||||
@ -299,9 +91,7 @@ class K8sDeployer(Deployer):
|
|||||||
print(f"Sending this pv: {pv}")
|
print(f"Sending this pv: {pv}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
pv_resp = self.core_api.read_persistent_volume(
|
pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name)
|
||||||
name=pv.metadata.name
|
|
||||||
)
|
|
||||||
if pv_resp:
|
if pv_resp:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVs already present:")
|
print("PVs already present:")
|
||||||
@ -324,8 +114,7 @@ class K8sDeployer(Deployer):
|
|||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
|
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
|
||||||
name=pvc.metadata.name, namespace=self.k8s_namespace
|
name=pvc.metadata.name, namespace=self.k8s_namespace)
|
||||||
)
|
|
||||||
if pvc_resp:
|
if pvc_resp:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs already present:")
|
print("PVCs already present:")
|
||||||
@ -334,9 +123,7 @@ class K8sDeployer(Deployer):
|
|||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
pass
|
pass
|
||||||
|
|
||||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(
|
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
||||||
body=pvc, namespace=self.k8s_namespace
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs created:")
|
print("PVCs created:")
|
||||||
print(f"{pvc_resp}")
|
print(f"{pvc_resp}")
|
||||||
@ -348,81 +135,46 @@ class K8sDeployer(Deployer):
|
|||||||
print(f"Sending this ConfigMap: {cfg_map}")
|
print(f"Sending this ConfigMap: {cfg_map}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||||
body=cfg_map, namespace=self.k8s_namespace
|
body=cfg_map,
|
||||||
|
namespace=self.k8s_namespace
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("ConfigMap created:")
|
print("ConfigMap created:")
|
||||||
print(f"{cfg_rsp}")
|
print(f"{cfg_rsp}")
|
||||||
|
|
||||||
def _create_deployment(self):
|
def _create_deployment(self):
|
||||||
# Skip if there are no pods to deploy (e.g. jobs-only stacks)
|
|
||||||
if not self.cluster_info.parsed_pod_yaml_map:
|
|
||||||
if opts.o.debug:
|
|
||||||
print("No pods defined, skipping Deployment creation")
|
|
||||||
return
|
|
||||||
# Process compose files into a Deployment
|
# Process compose files into a Deployment
|
||||||
deployment = self.cluster_info.get_deployment(
|
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
||||||
image_pull_policy=None if self.is_kind() else "Always"
|
|
||||||
)
|
|
||||||
# Create the k8s objects
|
# Create the k8s objects
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this deployment: {deployment}")
|
print(f"Sending this deployment: {deployment}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
deployment_resp = cast(
|
deployment_resp = self.apps_api.create_namespaced_deployment(
|
||||||
client.V1Deployment,
|
|
||||||
self.apps_api.create_namespaced_deployment(
|
|
||||||
body=deployment, namespace=self.k8s_namespace
|
body=deployment, namespace=self.k8s_namespace
|
||||||
),
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Deployment created:")
|
print("Deployment created:")
|
||||||
meta = deployment_resp.metadata
|
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
||||||
spec = deployment_resp.spec
|
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
||||||
if meta and spec and spec.template.spec:
|
|
||||||
ns = meta.namespace
|
|
||||||
name = meta.name
|
|
||||||
gen = meta.generation
|
|
||||||
containers = spec.template.spec.containers
|
|
||||||
img = containers[0].image if containers else None
|
|
||||||
print(f"{ns} {name} {gen} {img}")
|
|
||||||
|
|
||||||
service = self.cluster_info.get_service()
|
service: client.V1Service = self.cluster_info.get_service()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this service: {service}")
|
print(f"Sending this service: {service}")
|
||||||
if service and not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
service_resp = self.core_api.create_namespaced_service(
|
service_resp = self.core_api.create_namespaced_service(
|
||||||
namespace=self.k8s_namespace, body=service
|
namespace=self.k8s_namespace,
|
||||||
|
body=service
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Service created:")
|
print("Service created:")
|
||||||
print(f"{service_resp}")
|
print(f"{service_resp}")
|
||||||
|
|
||||||
def _create_jobs(self):
|
|
||||||
# Process job compose files into k8s Jobs
|
|
||||||
jobs = self.cluster_info.get_jobs(
|
|
||||||
image_pull_policy=None if self.is_kind() else "Always"
|
|
||||||
)
|
|
||||||
for job in jobs:
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Sending this job: {job}")
|
|
||||||
if not opts.o.dry_run:
|
|
||||||
job_resp = self.batch_api.create_namespaced_job(
|
|
||||||
body=job, namespace=self.k8s_namespace
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
|
||||||
print("Job created:")
|
|
||||||
if job_resp.metadata:
|
|
||||||
print(
|
|
||||||
f" {job_resp.metadata.namespace} "
|
|
||||||
f"{job_resp.metadata.name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _find_certificate_for_host_name(self, host_name):
|
def _find_certificate_for_host_name(self, host_name):
|
||||||
all_certificates = self.custom_obj_api.list_namespaced_custom_object(
|
all_certificates = self.custom_obj_api.list_namespaced_custom_object(
|
||||||
group="cert-manager.io",
|
group="cert-manager.io",
|
||||||
version="v1",
|
version="v1",
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
plural="certificates",
|
plural="certificates"
|
||||||
)
|
)
|
||||||
|
|
||||||
host_parts = host_name.split(".", 1)
|
host_parts = host_name.split(".", 1)
|
||||||
@ -430,7 +182,6 @@ class K8sDeployer(Deployer):
|
|||||||
if len(host_parts) == 2:
|
if len(host_parts) == 2:
|
||||||
host_as_wild = f"*.{host_parts[1]}"
|
host_as_wild = f"*.{host_parts[1]}"
|
||||||
|
|
||||||
# TODO: resolve method deprecation below
|
|
||||||
now = datetime.utcnow().replace(tzinfo=timezone.utc)
|
now = datetime.utcnow().replace(tzinfo=timezone.utc)
|
||||||
fmt = "%Y-%m-%dT%H:%M:%S%z"
|
fmt = "%Y-%m-%dT%H:%M:%S%z"
|
||||||
|
|
||||||
@ -447,90 +198,46 @@ class K8sDeployer(Deployer):
|
|||||||
if before < now < after:
|
if before < now < after:
|
||||||
# Check the status is Ready
|
# Check the status is Ready
|
||||||
for condition in status.get("conditions", []):
|
for condition in status.get("conditions", []):
|
||||||
if "True" == condition.get(
|
if "True" == condition.get("status") and "Ready" == condition.get("type"):
|
||||||
"status"
|
|
||||||
) and "Ready" == condition.get("type"):
|
|
||||||
return cert
|
return cert
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, services):
|
||||||
self.skip_cluster_management = skip_cluster_management
|
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
if self.is_kind():
|
||||||
# Create the kind cluster (or reuse existing one)
|
# Create the kind cluster
|
||||||
kind_config = str(
|
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||||
self.deployment_dir.joinpath(constants.kind_config_filename)
|
# Ensure the referenced containers are copied into kind
|
||||||
)
|
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||||
actual_cluster = create_cluster(self.kind_cluster_name, kind_config)
|
|
||||||
if actual_cluster != self.kind_cluster_name:
|
|
||||||
# An existing cluster was found, use it instead
|
|
||||||
self.kind_cluster_name = actual_cluster
|
|
||||||
# Only load locally-built images into kind
|
|
||||||
# Registry images (docker.io, ghcr.io, etc.) will be pulled by k8s
|
|
||||||
local_containers = self.deployment_context.stack.obj.get(
|
|
||||||
"containers", []
|
|
||||||
)
|
|
||||||
if local_containers:
|
|
||||||
# Filter image_set to only images matching local containers
|
|
||||||
local_images = {
|
|
||||||
img
|
|
||||||
for img in self.cluster_info.image_set
|
|
||||||
if any(c in img for c in local_containers)
|
|
||||||
}
|
|
||||||
if local_images:
|
|
||||||
load_images_into_kind(self.kind_cluster_name, local_images)
|
|
||||||
# Note: if no local containers defined, all images come from registries
|
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
# Create deployment-specific namespace for resource isolation
|
if self.is_kind():
|
||||||
self._ensure_namespace()
|
# Now configure an ingress controller (not installed by default in kind)
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
install_ingress_for_kind()
|
||||||
# Configure ingress controller (not installed by default in kind)
|
# Wait for ingress to start (deployment provisioning will fail unless this is done)
|
||||||
# Skip if already running (idempotent for shared cluster)
|
|
||||||
if not is_ingress_running():
|
|
||||||
install_ingress_for_kind(self.cluster_info.spec.get_acme_email())
|
|
||||||
# Wait for ingress to start
|
|
||||||
# (deployment provisioning will fail unless this is done)
|
|
||||||
wait_for_ingress_in_kind()
|
wait_for_ingress_in_kind()
|
||||||
# Create RuntimeClass if unlimited_memlock is enabled
|
|
||||||
if self.cluster_info.spec.get_unlimited_memlock():
|
|
||||||
_create_runtime_class(
|
|
||||||
constants.high_memlock_runtime,
|
|
||||||
constants.high_memlock_runtime,
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Dry run mode enabled, skipping k8s API connect")
|
print("Dry run mode enabled, skipping k8s API connect")
|
||||||
|
|
||||||
# Create registry secret if configured
|
|
||||||
from stack_orchestrator.deploy.deployment_create import create_registry_secret
|
|
||||||
|
|
||||||
create_registry_secret(self.cluster_info.spec, self.cluster_info.app_name, self.k8s_namespace)
|
|
||||||
|
|
||||||
self._create_volume_data()
|
self._create_volume_data()
|
||||||
self._create_deployment()
|
self._create_deployment()
|
||||||
self._create_jobs()
|
|
||||||
|
|
||||||
http_proxy_info = self.cluster_info.spec.get_http_proxy()
|
http_proxy_info = self.cluster_info.spec.get_http_proxy()
|
||||||
# Note: we don't support tls for kind (enabling tls causes errors)
|
# Note: at present we don't support tls for kind (and enabling tls causes errors)
|
||||||
use_tls = http_proxy_info and not self.is_kind()
|
use_tls = http_proxy_info and not self.is_kind()
|
||||||
certificate = (
|
certificate = self._find_certificate_for_host_name(http_proxy_info[0]["host-name"]) if use_tls else None
|
||||||
self._find_certificate_for_host_name(http_proxy_info[0]["host-name"])
|
|
||||||
if use_tls
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
if certificate:
|
if certificate:
|
||||||
print(f"Using existing certificate: {certificate}")
|
print(f"Using existing certificate: {certificate}")
|
||||||
|
|
||||||
ingress = self.cluster_info.get_ingress(
|
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=use_tls, certificate=certificate)
|
||||||
use_tls=use_tls, certificate=certificate
|
|
||||||
)
|
|
||||||
if ingress:
|
if ingress:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ingress: {ingress}")
|
print(f"Sending this ingress: {ingress}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||||
namespace=self.k8s_namespace, body=ingress
|
namespace=self.k8s_namespace,
|
||||||
|
body=ingress
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Ingress created:")
|
print("Ingress created:")
|
||||||
@ -539,56 +246,118 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No ingress configured")
|
print("No ingress configured")
|
||||||
|
|
||||||
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
|
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
||||||
for nodeport in nodeports:
|
if nodeport:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this nodeport: {nodeport}")
|
print(f"Sending this nodeport: {nodeport}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
nodeport_resp = self.core_api.create_namespaced_service(
|
nodeport_resp = self.core_api.create_namespaced_service(
|
||||||
namespace=self.k8s_namespace, body=nodeport
|
namespace=self.k8s_namespace,
|
||||||
|
body=nodeport
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("NodePort created:")
|
print("NodePort created:")
|
||||||
print(f"{nodeport_resp}")
|
print(f"{nodeport_resp}")
|
||||||
|
|
||||||
# Call start() hooks — stacks can create additional k8s resources
|
def down(self, timeout, volumes): # noqa: C901
|
||||||
if self.deployment_context:
|
|
||||||
from stack_orchestrator.deploy.deployment_create import call_stack_deploy_start
|
|
||||||
call_stack_deploy_start(self.deployment_context)
|
|
||||||
|
|
||||||
def down(self, timeout, volumes, skip_cluster_management):
|
|
||||||
self.skip_cluster_management = skip_cluster_management
|
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
|
# Delete the k8s objects
|
||||||
|
|
||||||
app_label = f"app={self.cluster_info.app_name}"
|
|
||||||
|
|
||||||
# PersistentVolumes are cluster-scoped (not namespaced), so delete by label
|
|
||||||
if volumes:
|
if volumes:
|
||||||
try:
|
# Create the host-path-mounted PVs for this deployment
|
||||||
pvs = self.core_api.list_persistent_volume(
|
pvs = self.cluster_info.get_pvs()
|
||||||
label_selector=app_label
|
for pv in pvs:
|
||||||
)
|
|
||||||
for pv in pvs.items:
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting PV: {pv.metadata.name}")
|
print(f"Deleting this pv: {pv}")
|
||||||
try:
|
try:
|
||||||
self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
||||||
except ApiException as e:
|
if opts.o.debug:
|
||||||
|
print("PV deleted:")
|
||||||
|
print(f"{pv_resp}")
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
except ApiException as e:
|
|
||||||
|
# Figure out the PVCs for this deployment
|
||||||
|
pvcs = self.cluster_info.get_pvcs()
|
||||||
|
for pvc in pvcs:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error listing PVs: {e}")
|
print(f"Deleting this pvc: {pvc}")
|
||||||
|
try:
|
||||||
|
pvc_resp = self.core_api.delete_namespaced_persistent_volume_claim(
|
||||||
|
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print("PVCs deleted:")
|
||||||
|
print(f"{pvc_resp}")
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
# When namespace is explicitly set in the spec, it may be shared with
|
# Figure out the ConfigMaps for this deployment
|
||||||
# other stacks — delete only this stack's resources by label.
|
cfg_maps = self.cluster_info.get_configmaps()
|
||||||
# Otherwise the namespace is owned by this deployment, delete it entirely.
|
for cfg_map in cfg_maps:
|
||||||
shared_namespace = self.deployment_context.spec.get_namespace() is not None
|
if opts.o.debug:
|
||||||
if shared_namespace:
|
print(f"Deleting this ConfigMap: {cfg_map}")
|
||||||
self._delete_resources_by_label(app_label, volumes)
|
try:
|
||||||
|
cfg_map_resp = self.core_api.delete_namespaced_config_map(
|
||||||
|
name=cfg_map.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print("ConfigMap deleted:")
|
||||||
|
print(f"{cfg_map_resp}")
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
|
deployment = self.cluster_info.get_deployment()
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting this deployment: {deployment}")
|
||||||
|
try:
|
||||||
|
self.apps_api.delete_namespaced_deployment(
|
||||||
|
name=deployment.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
|
service: client.V1Service = self.cluster_info.get_service()
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting service: {service}")
|
||||||
|
try:
|
||||||
|
self.core_api.delete_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
name=service.metadata.name
|
||||||
|
)
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
|
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||||
|
if ingress:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting this ingress: {ingress}")
|
||||||
|
try:
|
||||||
|
self.networking_api.delete_namespaced_ingress(
|
||||||
|
name=ingress.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
else:
|
else:
|
||||||
self._delete_namespace()
|
if opts.o.debug:
|
||||||
|
print("No ingress to delete")
|
||||||
|
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
||||||
|
if nodeport:
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Deleting this nodeport: {ingress}")
|
||||||
|
try:
|
||||||
|
self.core_api.delete_namespaced_service(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
name=nodeport.metadata.name
|
||||||
|
)
|
||||||
|
except client.exceptions.ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
else:
|
||||||
|
if opts.o.debug:
|
||||||
|
print("No nodeport to delete")
|
||||||
|
|
||||||
|
if self.is_kind():
|
||||||
# Destroy the kind cluster
|
# Destroy the kind cluster
|
||||||
destroy_cluster(self.kind_cluster_name)
|
destroy_cluster(self.kind_cluster_name)
|
||||||
|
|
||||||
@ -600,7 +369,6 @@ class K8sDeployer(Deployer):
|
|||||||
|
|
||||||
if all_pods.items:
|
if all_pods.items:
|
||||||
for p in all_pods.items:
|
for p in all_pods.items:
|
||||||
if p.metadata and p.metadata.name:
|
|
||||||
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
||||||
pods.append(p)
|
pods.append(p)
|
||||||
|
|
||||||
@ -611,40 +379,21 @@ class K8sDeployer(Deployer):
|
|||||||
ip = "?"
|
ip = "?"
|
||||||
tls = "?"
|
tls = "?"
|
||||||
try:
|
try:
|
||||||
cluster_ingress = self.cluster_info.get_ingress()
|
ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace,
|
||||||
if cluster_ingress is None or cluster_ingress.metadata is None:
|
name=self.cluster_info.get_ingress().metadata.name)
|
||||||
return
|
|
||||||
ingress = cast(
|
|
||||||
client.V1Ingress,
|
|
||||||
self.networking_api.read_namespaced_ingress(
|
|
||||||
namespace=self.k8s_namespace,
|
|
||||||
name=cluster_ingress.metadata.name,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if not ingress.spec or not ingress.spec.tls or not ingress.spec.rules:
|
|
||||||
return
|
|
||||||
|
|
||||||
cert = cast(
|
cert = self.custom_obj_api.get_namespaced_custom_object(
|
||||||
Dict[str, Any],
|
|
||||||
self.custom_obj_api.get_namespaced_custom_object(
|
|
||||||
group="cert-manager.io",
|
group="cert-manager.io",
|
||||||
version="v1",
|
version="v1",
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
plural="certificates",
|
plural="certificates",
|
||||||
name=ingress.spec.tls[0].secret_name,
|
name=ingress.spec.tls[0].secret_name
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
hostname = ingress.spec.rules[0].host
|
hostname = ingress.spec.rules[0].host
|
||||||
if ingress.status and ingress.status.load_balancer:
|
ip = ingress.status.load_balancer.ingress[0].ip
|
||||||
lb_ingress = ingress.status.load_balancer.ingress
|
|
||||||
if lb_ingress:
|
|
||||||
ip = lb_ingress[0].ip or "?"
|
|
||||||
cert_status = cert.get("status", {})
|
|
||||||
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
||||||
cert_status.get("notBefore", "?"),
|
cert["status"]["notBefore"], cert["status"]["notAfter"], ingress.spec.tls[0].hosts
|
||||||
cert_status.get("notAfter", "?"),
|
|
||||||
ingress.spec.tls[0].hosts,
|
|
||||||
)
|
)
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
pass
|
pass
|
||||||
@ -657,16 +406,10 @@ class K8sDeployer(Deployer):
|
|||||||
print("Pods:")
|
print("Pods:")
|
||||||
|
|
||||||
for p in pods:
|
for p in pods:
|
||||||
if not p.metadata:
|
|
||||||
continue
|
|
||||||
ns = p.metadata.namespace
|
|
||||||
name = p.metadata.name
|
|
||||||
if p.metadata.deletion_timestamp:
|
if p.metadata.deletion_timestamp:
|
||||||
ts = p.metadata.deletion_timestamp
|
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})")
|
||||||
print(f"\t{ns}/{name}: Terminating ({ts})")
|
|
||||||
else:
|
else:
|
||||||
ts = p.metadata.creation_timestamp
|
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})")
|
||||||
print(f"\t{ns}/{name}: Running ({ts})")
|
|
||||||
|
|
||||||
def ps(self):
|
def ps(self):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
@ -681,22 +424,19 @@ class K8sDeployer(Deployer):
|
|||||||
for c in p.spec.containers:
|
for c in p.spec.containers:
|
||||||
if c.ports:
|
if c.ports:
|
||||||
for prt in c.ports:
|
for prt in c.ports:
|
||||||
ports[str(prt.container_port)] = [
|
ports[str(prt.container_port)] = [AttrDict({
|
||||||
AttrDict(
|
"HostIp": pod_ip,
|
||||||
{"HostIp": pod_ip, "HostPort": prt.container_port}
|
"HostPort": prt.container_port
|
||||||
)
|
})]
|
||||||
]
|
|
||||||
|
|
||||||
ret.append(
|
ret.append(AttrDict({
|
||||||
AttrDict(
|
|
||||||
{
|
|
||||||
"id": f"{p.metadata.namespace}/{p.metadata.name}",
|
"id": f"{p.metadata.namespace}/{p.metadata.name}",
|
||||||
"name": p.metadata.name,
|
"name": p.metadata.name,
|
||||||
"namespace": p.metadata.namespace,
|
"namespace": p.metadata.namespace,
|
||||||
"network_settings": AttrDict({"ports": ports}),
|
"network_settings": AttrDict({
|
||||||
}
|
"ports": ports
|
||||||
)
|
})
|
||||||
)
|
}))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -711,133 +451,59 @@ class K8sDeployer(Deployer):
|
|||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
pods = pods_in_deployment(self.core_api, self.cluster_info.app_name, namespace=self.k8s_namespace)
|
pods = pods_in_deployment(self.core_api, self.cluster_info.app_name)
|
||||||
if len(pods) > 1:
|
if len(pods) > 1:
|
||||||
print("Warning: more than one pod in the deployment")
|
print("Warning: more than one pod in the deployment")
|
||||||
if len(pods) == 0:
|
if len(pods) == 0:
|
||||||
log_data = "******* Pods not running ********\n"
|
log_data = "******* Pods not running ********\n"
|
||||||
else:
|
else:
|
||||||
k8s_pod_name = pods[0]
|
k8s_pod_name = pods[0]
|
||||||
containers = containers_in_pod(self.core_api, k8s_pod_name, namespace=self.k8s_namespace)
|
containers = containers_in_pod(self.core_api, k8s_pod_name)
|
||||||
# If pod not started, logs request below will throw an exception
|
# If the pod is not yet started, the logs request below will throw an exception
|
||||||
try:
|
try:
|
||||||
log_data = ""
|
log_data = ""
|
||||||
for container in containers:
|
for container in containers:
|
||||||
container_log = self.core_api.read_namespaced_pod_log(
|
container_log = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container=container)
|
||||||
k8s_pod_name, namespace=self.k8s_namespace, container=container
|
|
||||||
)
|
|
||||||
container_log_lines = container_log.splitlines()
|
container_log_lines = container_log.splitlines()
|
||||||
for line in container_log_lines:
|
for line in container_log_lines:
|
||||||
log_data += f"{container}: {line}\n"
|
log_data += f"{container}: {line}\n"
|
||||||
except ApiException as e:
|
except client.exceptions.ApiException as e:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error from read_namespaced_pod_log: {e}")
|
print(f"Error from read_namespaced_pod_log: {e}")
|
||||||
log_data = "******* No logs available ********\n"
|
log_data = "******* No logs available ********\n"
|
||||||
return log_stream_from_string(log_data)
|
return log_stream_from_string(log_data)
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
if not self.cluster_info.parsed_pod_yaml_map:
|
|
||||||
if opts.o.debug:
|
|
||||||
print("No pods defined, skipping update")
|
|
||||||
return
|
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
ref_deployment = self.cluster_info.get_deployment()
|
ref_deployment = self.cluster_info.get_deployment()
|
||||||
if not ref_deployment or not ref_deployment.metadata:
|
|
||||||
return
|
|
||||||
ref_name = ref_deployment.metadata.name
|
|
||||||
if not ref_name:
|
|
||||||
return
|
|
||||||
|
|
||||||
deployment = cast(
|
deployment = self.apps_api.read_namespaced_deployment(
|
||||||
client.V1Deployment,
|
name=ref_deployment.metadata.name,
|
||||||
self.apps_api.read_namespaced_deployment(
|
namespace=self.k8s_namespace
|
||||||
name=ref_name, namespace=self.k8s_namespace
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
if not deployment.spec or not deployment.spec.template:
|
|
||||||
return
|
|
||||||
template_spec = deployment.spec.template.spec
|
|
||||||
if not template_spec or not template_spec.containers:
|
|
||||||
return
|
|
||||||
|
|
||||||
ref_spec = ref_deployment.spec
|
new_env = ref_deployment.spec.template.spec.containers[0].env
|
||||||
if ref_spec and ref_spec.template and ref_spec.template.spec:
|
for container in deployment.spec.template.spec.containers:
|
||||||
ref_containers = ref_spec.template.spec.containers
|
|
||||||
if ref_containers:
|
|
||||||
new_env = ref_containers[0].env
|
|
||||||
for container in template_spec.containers:
|
|
||||||
old_env = container.env
|
old_env = container.env
|
||||||
if old_env != new_env:
|
if old_env != new_env:
|
||||||
container.env = new_env
|
container.env = new_env
|
||||||
|
|
||||||
template_meta = deployment.spec.template.metadata
|
deployment.spec.template.metadata.annotations = {
|
||||||
if template_meta:
|
|
||||||
template_meta.annotations = {
|
|
||||||
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
||||||
.replace(tzinfo=timezone.utc)
|
.replace(tzinfo=timezone.utc)
|
||||||
.isoformat()
|
.isoformat()
|
||||||
}
|
}
|
||||||
|
|
||||||
self.apps_api.patch_namespaced_deployment(
|
self.apps_api.patch_namespaced_deployment(
|
||||||
name=ref_name,
|
name=ref_deployment.metadata.name,
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
body=deployment,
|
body=deployment
|
||||||
)
|
)
|
||||||
|
|
||||||
def run(
|
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
||||||
self,
|
|
||||||
image: str,
|
|
||||||
command=None,
|
|
||||||
user=None,
|
|
||||||
volumes=None,
|
|
||||||
entrypoint=None,
|
|
||||||
env={},
|
|
||||||
ports=[],
|
|
||||||
detach=False,
|
|
||||||
):
|
|
||||||
# We need to figure out how to do this -- check why we're being called first
|
# We need to figure out how to do this -- check why we're being called first
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def run_job(self, job_name: str, helm_release: Optional[str] = None):
|
|
||||||
if not opts.o.dry_run:
|
|
||||||
# Check if this is a helm-based deployment
|
|
||||||
chart_dir = self.deployment_dir / "chart"
|
|
||||||
if chart_dir.exists():
|
|
||||||
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
|
|
||||||
|
|
||||||
# Run the job using the helm job runner
|
|
||||||
run_helm_job(
|
|
||||||
chart_dir=chart_dir,
|
|
||||||
job_name=job_name,
|
|
||||||
release=helm_release,
|
|
||||||
namespace=self.k8s_namespace,
|
|
||||||
timeout=600,
|
|
||||||
verbose=opts.o.verbose,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Non-Helm path: create job from ClusterInfo
|
|
||||||
self.connect_api()
|
|
||||||
jobs = self.cluster_info.get_jobs(
|
|
||||||
image_pull_policy=None if self.is_kind() else "Always"
|
|
||||||
)
|
|
||||||
# Find the matching job by name
|
|
||||||
target_name = f"{self.cluster_info.app_name}-job-{job_name}"
|
|
||||||
matched_job = None
|
|
||||||
for job in jobs:
|
|
||||||
if job.metadata and job.metadata.name == target_name:
|
|
||||||
matched_job = job
|
|
||||||
break
|
|
||||||
if matched_job is None:
|
|
||||||
raise Exception(
|
|
||||||
f"Job '{job_name}' not found. Available jobs: "
|
|
||||||
f"{[j.metadata.name for j in jobs if j.metadata]}"
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(f"Creating job: {target_name}")
|
|
||||||
self.batch_api.create_namespaced_job(
|
|
||||||
body=matched_job, namespace=self.k8s_namespace
|
|
||||||
)
|
|
||||||
|
|
||||||
def is_kind(self):
|
def is_kind(self):
|
||||||
return self.type == "k8s-kind"
|
return self.type == "k8s-kind"
|
||||||
|
|
||||||
@ -853,20 +519,6 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
|||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
# No need to do this for the remote k8s case
|
# No need to do this for the remote k8s case
|
||||||
if self.type == "k8s-kind":
|
if self.type == "k8s-kind":
|
||||||
# Generate high-memlock-spec.json if unlimited_memlock is enabled.
|
|
||||||
# Must be done before generate_kind_config() which references it.
|
|
||||||
if self.deployment_context.spec.get_unlimited_memlock():
|
|
||||||
spec_content = generate_high_memlock_spec_json()
|
|
||||||
spec_file = deployment_dir.joinpath(
|
|
||||||
constants.high_memlock_spec_filename
|
|
||||||
)
|
|
||||||
if opts.o.debug:
|
|
||||||
print(
|
|
||||||
f"Creating high-memlock spec for unlimited memlock: {spec_file}"
|
|
||||||
)
|
|
||||||
with open(spec_file, "w") as output_file:
|
|
||||||
output_file.write(spec_content)
|
|
||||||
|
|
||||||
# Check the file isn't already there
|
# Check the file isn't already there
|
||||||
# Get the config file contents
|
# Get the config file contents
|
||||||
content = generate_kind_config(deployment_dir, self.deployment_context)
|
content = generate_kind_config(deployment_dir, self.deployment_context)
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
# Copyright © 2025 Vulcanize
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user