forked from cerc-io/stack-orchestrator
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 55b76b9b57 | |||
|
|
d07a3afd27 | ||
|
|
a5b373da26 | ||
|
|
99db75da19 | ||
|
|
d4e935484f | ||
|
|
4f01054781 | ||
|
|
811bbd9db4 | ||
|
|
8d9682eb47 | ||
|
|
638435873c | ||
|
|
97a85359ff | ||
|
|
ffa00767d4 | ||
|
|
86462c940f | ||
|
|
87db167d7f | ||
|
|
dd856af2d3 | ||
|
|
cd3d908d0d | ||
|
|
03f9acf869 | ||
|
|
ba1aad9fa6 | ||
|
|
dc36a6564a | ||
|
|
c5c3fc1618 | ||
|
|
2e384b7179 | ||
|
|
b708836aa9 | ||
|
|
d8da9b6515 | ||
|
|
5a1399f2b2 | ||
|
|
89db6e1e92 | ||
|
|
9bd59f29d9 | ||
| 55d6c5b495 | |||
|
|
f3ef3e9a1f | ||
|
|
1768bd0fe1 | ||
| 8afae1904b | |||
| 7acabb0743 | |||
| ccccd9f957 | |||
| 34f3b719e4 | |||
| 0e814bd4da | |||
| 873a6d472c | |||
| 39df4683ac | |||
| 23ca4c4341 | |||
| f64ef5d128 | |||
| 5f8e809b2d | |||
| 4a7df2de33 | |||
| 0c47da42fe | |||
| e290c62aca |
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -33,7 +34,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: K8s Deployment Control Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -32,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -1 +1,3 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
|
Triggered: 2026-01-21
|
||||||
|
Triggered: 2026-01-21 19:28:29
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
34
.pre-commit-config.yaml
Normal file
34
.pre-commit-config.yaml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
args: ['--allow-multiple-documents']
|
||||||
|
- id: check-json
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-added-large-files
|
||||||
|
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 23.12.1
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
language_version: python3
|
||||||
|
|
||||||
|
- repo: https://github.com/PyCQA/flake8
|
||||||
|
rev: 7.1.1
|
||||||
|
hooks:
|
||||||
|
- id: flake8
|
||||||
|
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
||||||
|
|
||||||
|
- repo: https://github.com/RobertCraigie/pyright-python
|
||||||
|
rev: v1.1.345
|
||||||
|
hooks:
|
||||||
|
- id: pyright
|
||||||
|
|
||||||
|
- repo: https://github.com/adrienverge/yamllint
|
||||||
|
rev: v1.35.1
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [-d, relaxed]
|
||||||
151
AI-FRIENDLY-PLAN.md
Normal file
151
AI-FRIENDLY-PLAN.md
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
# Plan: Make Stack-Orchestrator AI-Friendly
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 1: Documentation & Context Files
|
||||||
|
|
||||||
|
### 1.1 Add CLAUDE.md
|
||||||
|
|
||||||
|
Create a root-level context file for AI assistants.
|
||||||
|
|
||||||
|
**File:** `CLAUDE.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Project overview (what stack-orchestrator does)
|
||||||
|
- Stack creation workflow (step-by-step)
|
||||||
|
- File naming conventions
|
||||||
|
- Required vs optional fields in stack.yml
|
||||||
|
- Common patterns and anti-patterns
|
||||||
|
- Links to example stacks (simple, medium, complex)
|
||||||
|
|
||||||
|
### 1.2 Add JSON Schema for stack.yml
|
||||||
|
|
||||||
|
Create formal validation schema.
|
||||||
|
|
||||||
|
**File:** `schemas/stack-schema.json`
|
||||||
|
|
||||||
|
Benefits:
|
||||||
|
- AI tools can validate generated stacks
|
||||||
|
- IDEs provide autocomplete
|
||||||
|
- CI can catch errors early
|
||||||
|
|
||||||
|
### 1.3 Add Template Stack with Comments
|
||||||
|
|
||||||
|
Create an annotated template for reference.
|
||||||
|
|
||||||
|
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Stack definition template - copy this directory to create a new stack
|
||||||
|
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
||||||
|
name: my-stack # Required: lowercase, hyphens only
|
||||||
|
description: "Human-readable description" # Optional
|
||||||
|
repos: # Git repositories to clone
|
||||||
|
- github.com/org/repo
|
||||||
|
containers: # Container images to build (must have matching container-build/)
|
||||||
|
- cerc/my-container
|
||||||
|
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
||||||
|
- my-pod
|
||||||
|
```
|
||||||
|
|
||||||
|
### 1.4 Document Validation Rules
|
||||||
|
|
||||||
|
Create explicit documentation of constraints currently scattered in code.
|
||||||
|
|
||||||
|
**File:** `docs/stack-format.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Container names must start with `cerc/`
|
||||||
|
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
||||||
|
- Repository format: `host/org/repo[@ref]`
|
||||||
|
- Stack directory name should match `name` field
|
||||||
|
- Version field options and differences
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 2: Add `create-stack` Command
|
||||||
|
|
||||||
|
### 2.1 Command Overview
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Behavior:**
|
||||||
|
1. Parse repo URL to extract app name (if --name not provided)
|
||||||
|
2. Create `stacks/{name}/stack.yml`
|
||||||
|
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
||||||
|
4. Create `compose/docker-compose-{name}.yml`
|
||||||
|
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
||||||
|
|
||||||
|
### 2.2 Files to Create
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | Package init |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
||||||
|
|
||||||
|
### 2.3 Files to Modify
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
### 2.4 Command Options
|
||||||
|
|
||||||
|
| Option | Required | Description |
|
||||||
|
|--------|----------|-------------|
|
||||||
|
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
||||||
|
| `--name` | No | Stack name (defaults to repo name) |
|
||||||
|
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
||||||
|
| `--force` | No | Overwrite existing files |
|
||||||
|
|
||||||
|
### 2.5 Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| service | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| empty | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 3: Implementation Summary
|
||||||
|
|
||||||
|
### New Files (6)
|
||||||
|
|
||||||
|
1. `CLAUDE.md` - AI assistant context
|
||||||
|
2. `schemas/stack-schema.json` - Validation schema
|
||||||
|
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
||||||
|
4. `docs/stack-format.md` - Stack format documentation
|
||||||
|
5. `stack_orchestrator/create/__init__.py` - Package init
|
||||||
|
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
||||||
|
|
||||||
|
### Modified Files (1)
|
||||||
|
|
||||||
|
1. `stack_orchestrator/main.py` - Register create-stack command
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# 3. Creates all expected files
|
||||||
|
laconic-so create-stack --repo github.com/org/test-app
|
||||||
|
ls stack_orchestrator/data/stacks/test-app/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-test-app/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
||||||
|
|
||||||
|
# 4. Build works with generated stack
|
||||||
|
laconic-so --stack test-app build-containers
|
||||||
|
```
|
||||||
50
CLAUDE.md
Normal file
50
CLAUDE.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
||||||
|
|
||||||
|
## Some rules to follow
|
||||||
|
NEVER speculate about the cause of something
|
||||||
|
NEVER assume your hypotheses are true without evidence
|
||||||
|
|
||||||
|
ALWAYS clearly state when something is a hypothesis
|
||||||
|
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
||||||
|
|
||||||
|
## Key Principles
|
||||||
|
|
||||||
|
### Development Guidelines
|
||||||
|
- **Single responsibility** - Each component has one clear purpose
|
||||||
|
- **Fail fast** - Let errors propagate, don't hide failures
|
||||||
|
- **DRY/KISS** - Minimize duplication and complexity
|
||||||
|
|
||||||
|
## Development Philosophy: Conversational Literate Programming
|
||||||
|
|
||||||
|
### Approach
|
||||||
|
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
||||||
|
|
||||||
|
### Core Principles
|
||||||
|
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
||||||
|
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
||||||
|
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
||||||
|
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
||||||
|
|
||||||
|
### Working Method
|
||||||
|
1. **Explore and Understand**: Read existing code to understand current state
|
||||||
|
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
||||||
|
3. **Document Intent**: Update TODO.md with clear justification before coding
|
||||||
|
4. **Explain Changes**: Each modification includes reasoning and context
|
||||||
|
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
||||||
|
|
||||||
|
### Implementation Guidelines
|
||||||
|
- Treat conversations as primary documentation
|
||||||
|
- Explain architectural decisions before implementing
|
||||||
|
- Use TODO.md as the "literate document" that justifies all work
|
||||||
|
- Maintain clear narrative threads across sessions
|
||||||
|
- Workshop complex ideas before coding
|
||||||
|
|
||||||
|
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
||||||
|
|
||||||
|
## Insights and Observations
|
||||||
|
|
||||||
|
### Design Principles
|
||||||
|
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
||||||
|
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
||||||
@ -78,5 +78,3 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
413
STACK-CREATION-GUIDE.md
Normal file
413
STACK-CREATION-GUIDE.md
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
# Implementing `laconic-so create-stack` Command
|
||||||
|
|
||||||
|
A plan for adding a new CLI command to scaffold stack files automatically.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Add a `create-stack` command that generates all required files for a new stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --name my-stack --type webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
```
|
||||||
|
stack_orchestrator/data/
|
||||||
|
├── stacks/my-stack/stack.yml
|
||||||
|
├── container-build/cerc-my-stack/
|
||||||
|
│ ├── Dockerfile
|
||||||
|
│ └── build.sh
|
||||||
|
└── compose/docker-compose-my-stack.yml
|
||||||
|
|
||||||
|
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CLI Architecture Summary
|
||||||
|
|
||||||
|
### Command Registration Pattern
|
||||||
|
|
||||||
|
Commands are Click functions registered in `main.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# main.py (line ~70)
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Global Options Access
|
||||||
|
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print("message")
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print("(would create files)")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Utilities
|
||||||
|
|
||||||
|
| Function | Location | Purpose |
|
||||||
|
|----------|----------|---------|
|
||||||
|
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
||||||
|
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
||||||
|
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Create
|
||||||
|
|
||||||
|
### 1. Command Module
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/__init__.py`**
|
||||||
|
```python
|
||||||
|
# Empty file to make this a package
|
||||||
|
```
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/create_stack.py`**
|
||||||
|
```python
|
||||||
|
import click
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import copy
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import error_exit, get_yaml
|
||||||
|
|
||||||
|
# Template types
|
||||||
|
STACK_TEMPLATES = {
|
||||||
|
"webapp": {
|
||||||
|
"description": "Web application with Node.js",
|
||||||
|
"base_image": "node:20-bullseye-slim",
|
||||||
|
"port": 3000,
|
||||||
|
},
|
||||||
|
"service": {
|
||||||
|
"description": "Backend service",
|
||||||
|
"base_image": "python:3.11-slim",
|
||||||
|
"port": 8080,
|
||||||
|
},
|
||||||
|
"empty": {
|
||||||
|
"description": "Minimal stack with no defaults",
|
||||||
|
"base_image": None,
|
||||||
|
"port": None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_dir() -> Path:
|
||||||
|
"""Get path to stack_orchestrator/data directory"""
|
||||||
|
return Path(__file__).absolute().parent.parent.joinpath("data")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_stack_name(name: str) -> None:
|
||||||
|
"""Validate stack name follows conventions"""
|
||||||
|
import re
|
||||||
|
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
||||||
|
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
||||||
|
if name.startswith("cerc-"):
|
||||||
|
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
||||||
|
|
||||||
|
|
||||||
|
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
||||||
|
"""Create stack.yml file"""
|
||||||
|
config = {
|
||||||
|
"version": "1.2",
|
||||||
|
"name": name,
|
||||||
|
"description": template.get("description", f"Stack: {name}"),
|
||||||
|
"repos": [repo_url] if repo_url else [],
|
||||||
|
"containers": [f"cerc/{name}"],
|
||||||
|
"pods": [name],
|
||||||
|
}
|
||||||
|
|
||||||
|
stack_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(stack_dir / "stack.yml", "w") as f:
|
||||||
|
get_yaml().dump(config, f)
|
||||||
|
|
||||||
|
|
||||||
|
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create Dockerfile"""
|
||||||
|
base_image = template.get("base_image", "node:20-bullseye-slim")
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
dockerfile_content = f'''# Build stage
|
||||||
|
FROM {base_image} AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
# Production stage
|
||||||
|
FROM {base_image}
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci --only=production
|
||||||
|
COPY --from=builder /app/dist ./dist
|
||||||
|
|
||||||
|
EXPOSE {port}
|
||||||
|
CMD ["npm", "run", "start"]
|
||||||
|
'''
|
||||||
|
|
||||||
|
container_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(container_dir / "Dockerfile", "w") as f:
|
||||||
|
f.write(dockerfile_content)
|
||||||
|
|
||||||
|
|
||||||
|
def create_build_script(container_dir: Path, name: str) -> None:
|
||||||
|
"""Create build.sh script"""
|
||||||
|
build_script = f'''#!/usr/bin/env bash
|
||||||
|
# Build cerc/{name}
|
||||||
|
|
||||||
|
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
||||||
|
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
|
docker build -t cerc/{name}:local \\
|
||||||
|
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
||||||
|
${{build_command_args}} \\
|
||||||
|
${{CERC_REPO_BASE_DIR}}/{name}
|
||||||
|
'''
|
||||||
|
|
||||||
|
build_path = container_dir / "build.sh"
|
||||||
|
with open(build_path, "w") as f:
|
||||||
|
f.write(build_script)
|
||||||
|
|
||||||
|
# Make executable
|
||||||
|
os.chmod(build_path, 0o755)
|
||||||
|
|
||||||
|
|
||||||
|
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create docker-compose file"""
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
compose_content = {
|
||||||
|
"version": "3.8",
|
||||||
|
"services": {
|
||||||
|
name: {
|
||||||
|
"image": f"cerc/{name}:local",
|
||||||
|
"restart": "unless-stopped",
|
||||||
|
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
||||||
|
"environment": {
|
||||||
|
"NODE_ENV": "${NODE_ENV:-production}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
||||||
|
get_yaml().dump(compose_content, f)
|
||||||
|
|
||||||
|
|
||||||
|
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
||||||
|
"""Add entry to a list file if not already present"""
|
||||||
|
list_path = data_dir / filename
|
||||||
|
|
||||||
|
# Read existing entries
|
||||||
|
existing = set()
|
||||||
|
if list_path.exists():
|
||||||
|
with open(list_path, "r") as f:
|
||||||
|
existing = set(line.strip() for line in f if line.strip())
|
||||||
|
|
||||||
|
# Add new entry
|
||||||
|
if entry not in existing:
|
||||||
|
with open(list_path, "a") as f:
|
||||||
|
f.write(f"{entry}\n")
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
||||||
|
@click.option("--type", "stack_type", default="webapp",
|
||||||
|
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
||||||
|
help="Stack template type")
|
||||||
|
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
||||||
|
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
||||||
|
"""Create a new stack with all required files.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-app --type webapp
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
||||||
|
"""
|
||||||
|
# Validate
|
||||||
|
validate_stack_name(name)
|
||||||
|
|
||||||
|
template = STACK_TEMPLATES[stack_type]
|
||||||
|
data_dir = get_data_dir()
|
||||||
|
|
||||||
|
# Define paths
|
||||||
|
stack_dir = data_dir / "stacks" / name
|
||||||
|
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
||||||
|
compose_dir = data_dir / "compose"
|
||||||
|
|
||||||
|
# Check for existing files
|
||||||
|
if not force:
|
||||||
|
if stack_dir.exists():
|
||||||
|
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
||||||
|
if container_dir.exists():
|
||||||
|
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
||||||
|
|
||||||
|
# Dry run check
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print(f"Would create stack '{name}' with template '{stack_type}':")
|
||||||
|
print(f" - {stack_dir}/stack.yml")
|
||||||
|
print(f" - {container_dir}/Dockerfile")
|
||||||
|
print(f" - {container_dir}/build.sh")
|
||||||
|
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
print(f" - Update repository-list.txt")
|
||||||
|
print(f" - Update container-image-list.txt")
|
||||||
|
print(f" - Update pod-list.txt")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create files
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
||||||
|
|
||||||
|
create_stack_yml(stack_dir, name, template, repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {stack_dir}/stack.yml")
|
||||||
|
|
||||||
|
create_dockerfile(container_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/Dockerfile")
|
||||||
|
|
||||||
|
create_build_script(container_dir, name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/build.sh")
|
||||||
|
|
||||||
|
create_compose_file(compose_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
|
||||||
|
# Update list files
|
||||||
|
if repo:
|
||||||
|
update_list_file(data_dir, "repository-list.txt", repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {repo} to repository-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added cerc/{name} to container-image-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "pod-list.txt", name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {name} to pod-list.txt")
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"\nStack '{name}' created successfully!")
|
||||||
|
print(f"\nNext steps:")
|
||||||
|
print(f" 1. Edit {stack_dir}/stack.yml")
|
||||||
|
print(f" 2. Customize {container_dir}/Dockerfile")
|
||||||
|
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
||||||
|
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Register Command in main.py
|
||||||
|
|
||||||
|
**Edit `stack_orchestrator/main.py`**
|
||||||
|
|
||||||
|
Add import:
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
```
|
||||||
|
|
||||||
|
Add command registration (after line ~78):
|
||||||
|
```python
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Step 1: Create module structure
|
||||||
|
```bash
|
||||||
|
mkdir -p stack_orchestrator/create
|
||||||
|
touch stack_orchestrator/create/__init__.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Create the command file
|
||||||
|
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
||||||
|
|
||||||
|
### Step 3: Register in main.py
|
||||||
|
Add the import and `cli.add_command()` line.
|
||||||
|
|
||||||
|
### Step 4: Test the command
|
||||||
|
```bash
|
||||||
|
# Show help
|
||||||
|
laconic-so create-stack --help
|
||||||
|
|
||||||
|
# Dry run
|
||||||
|
laconic-so --dry-run create-stack --name test-app --type webapp
|
||||||
|
|
||||||
|
# Create a stack
|
||||||
|
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
ls -la stack_orchestrator/data/stacks/test-app/
|
||||||
|
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| `service` | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| `empty` | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
1. **Interactive mode** - Prompt for values if not provided
|
||||||
|
2. **More templates** - Go, Rust, database stacks
|
||||||
|
3. **Template from existing** - `--from-stack existing-stack`
|
||||||
|
4. **External stack support** - Create in custom directory
|
||||||
|
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --name verify-test --type webapp
|
||||||
|
|
||||||
|
# 3. Full creation works
|
||||||
|
laconic-so create-stack --name verify-test --type webapp
|
||||||
|
ls stack_orchestrator/data/stacks/verify-test/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
|
||||||
|
# 4. Build works
|
||||||
|
laconic-so --stack verify-test build-containers
|
||||||
|
|
||||||
|
# 5. Cleanup
|
||||||
|
rm -rf stack_orchestrator/data/stacks/verify-test
|
||||||
|
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
||||||
|
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
```
|
||||||
16
TODO.md
Normal file
16
TODO.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# TODO
|
||||||
|
|
||||||
|
## Features Needed
|
||||||
|
|
||||||
|
### Update Stack Command
|
||||||
|
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
||||||
|
|
||||||
|
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
||||||
|
|
||||||
|
## Architecture Refactoring
|
||||||
|
|
||||||
|
### Separate Deployer from Stack Orchestrator CLI
|
||||||
|
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
||||||
|
|
||||||
|
### Separate Stacks from Stack Orchestrator Repo
|
||||||
|
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
||||||
550
docs/docker-compose-deployment.md
Normal file
550
docs/docker-compose-deployment.md
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
# Docker Compose Deployment Guide
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
### What is a Deployer?
|
||||||
|
|
||||||
|
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
||||||
|
|
||||||
|
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To deploy stacks using Docker Compose, you need:
|
||||||
|
|
||||||
|
- Docker Engine (20.10+)
|
||||||
|
- Docker Compose plugin (v2.0+)
|
||||||
|
- Python 3.8+
|
||||||
|
- stack-orchestrator installed (`laconic-so`)
|
||||||
|
|
||||||
|
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
||||||
|
|
||||||
|
## Deployment Workflow
|
||||||
|
|
||||||
|
The typical deployment workflow consists of four main steps:
|
||||||
|
|
||||||
|
1. **Setup repositories and build containers** (first time only)
|
||||||
|
2. **Initialize deployment specification**
|
||||||
|
3. **Create deployment directory**
|
||||||
|
4. **Start and manage services**
|
||||||
|
|
||||||
|
## Quick Start Example
|
||||||
|
|
||||||
|
Here's a complete example using the built-in `test` stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Setup (first time only)
|
||||||
|
laconic-so --stack test setup-repositories
|
||||||
|
laconic-so --stack test build-containers
|
||||||
|
|
||||||
|
# Step 2: Initialize deployment spec
|
||||||
|
laconic-so --stack test deploy init --output test-spec.yml
|
||||||
|
|
||||||
|
# Step 3: Create deployment directory
|
||||||
|
laconic-so --stack test deploy create \
|
||||||
|
--spec-file test-spec.yml \
|
||||||
|
--deployment-dir test-deployment
|
||||||
|
|
||||||
|
# Step 4: Start services
|
||||||
|
laconic-so deployment --dir test-deployment start
|
||||||
|
|
||||||
|
# View running services
|
||||||
|
laconic-so deployment --dir test-deployment ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir test-deployment logs
|
||||||
|
|
||||||
|
# Stop services (preserves data)
|
||||||
|
laconic-so deployment --dir test-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
Stack-orchestrator supports two deployment workflows:
|
||||||
|
|
||||||
|
### 1. Deployment Directory Workflow (Recommended)
|
||||||
|
|
||||||
|
This workflow creates a persistent deployment directory that contains all configuration and data.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Production deployments
|
||||||
|
- When you need to preserve configuration
|
||||||
|
- When you want to manage multiple deployments
|
||||||
|
- When you need persistent volume data
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Optionally edit eth-spec.yml to customize configuration
|
||||||
|
|
||||||
|
# Create deployment directory
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir my-eth-deployment
|
||||||
|
|
||||||
|
# Start the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment start
|
||||||
|
|
||||||
|
# Manage the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment ps
|
||||||
|
laconic-so deployment --dir my-eth-deployment logs
|
||||||
|
laconic-so deployment --dir my-eth-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Quick Deploy Workflow
|
||||||
|
|
||||||
|
This workflow deploys directly without creating a persistent deployment directory.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Quick testing
|
||||||
|
- Temporary deployments
|
||||||
|
- Simple stacks that don't require customization
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start the stack directly
|
||||||
|
laconic-so --stack test deploy up
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
laconic-so --stack test deploy port test 80
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack test deploy logs
|
||||||
|
|
||||||
|
# Stop (preserves volumes)
|
||||||
|
laconic-so --stack test deploy down
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so --stack test deploy down --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Real-World Example: Ethereum Fixturenet
|
||||||
|
|
||||||
|
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Setup (first time only)
|
||||||
|
laconic-so --stack fixturenet-eth setup-repositories
|
||||||
|
laconic-so --stack fixturenet-eth build-containers
|
||||||
|
|
||||||
|
# Initialize with default configuration
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Create deployment
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir fixturenet-eth-deployment
|
||||||
|
|
||||||
|
# Start the network
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment ps
|
||||||
|
|
||||||
|
# Access logs from specific service
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
||||||
|
|
||||||
|
# Stop the network (preserves blockchain data)
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop
|
||||||
|
|
||||||
|
# Start again - blockchain data is preserved
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Clean up everything including data
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Passing Configuration Parameters
|
||||||
|
|
||||||
|
Configuration can be passed in three ways:
|
||||||
|
|
||||||
|
**1. At init time via `--config` flag:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack test deploy init --output spec.yml \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Edit the spec file after init:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize
|
||||||
|
laconic-so --stack test deploy init --output spec.yml
|
||||||
|
|
||||||
|
# Edit spec.yml
|
||||||
|
vim spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Example spec.yml:
|
||||||
|
```yaml
|
||||||
|
stack: test
|
||||||
|
config:
|
||||||
|
PARAM1: value1
|
||||||
|
PARAM2: value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Docker Compose defaults:**
|
||||||
|
|
||||||
|
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
||||||
|
|
||||||
|
### Port Mapping
|
||||||
|
|
||||||
|
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the host port for container port 80 on service 'webapp'
|
||||||
|
laconic-so deployment --dir my-deployment port webapp 80
|
||||||
|
|
||||||
|
# Output example: 0.0.0.0:32768
|
||||||
|
```
|
||||||
|
|
||||||
|
To configure fixed ports, edit the spec file before creating the deployment:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8080:80' # Maps host port 8080 to container port 80
|
||||||
|
api:
|
||||||
|
- '3000:3000'
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create the deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack my-stack deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Volume Persistence
|
||||||
|
|
||||||
|
Volumes are preserved between stop/start cycles by default:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop but keep data
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Start again - data is still there
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
To completely remove all data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop and delete all volumes
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
Volume data is stored in `<deployment-dir>/data/`.
|
||||||
|
|
||||||
|
## Common Operations
|
||||||
|
|
||||||
|
### Viewing Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# All services, continuous follow
|
||||||
|
laconic-so deployment --dir my-deployment logs --follow
|
||||||
|
|
||||||
|
# Last 100 lines from all services
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 100
|
||||||
|
|
||||||
|
# Specific service only
|
||||||
|
laconic-so deployment --dir my-deployment logs webapp
|
||||||
|
|
||||||
|
# Combine options
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
### Executing Commands in Containers
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute a command in a running service
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp ls -la
|
||||||
|
|
||||||
|
# Interactive shell
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
||||||
|
|
||||||
|
# Run command with specific environment variables
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checking Service Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all running services
|
||||||
|
laconic-so deployment --dir my-deployment ps
|
||||||
|
|
||||||
|
# Check using Docker directly
|
||||||
|
docker ps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updating a Running Deployment
|
||||||
|
|
||||||
|
If you need to change configuration after deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Edit the spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
|
||||||
|
# 2. Regenerate configuration
|
||||||
|
laconic-so deployment --dir my-deployment update
|
||||||
|
|
||||||
|
# 3. Restart services to apply changes
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Service Deployments
|
||||||
|
|
||||||
|
Many stacks deploy multiple services that work together:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deploy a stack with multiple services
|
||||||
|
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
||||||
|
laconic-so --stack laconicd-with-console deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir laconicd-deployment
|
||||||
|
|
||||||
|
laconic-so deployment --dir laconicd-deployment start
|
||||||
|
|
||||||
|
# View all services
|
||||||
|
laconic-so deployment --dir laconicd-deployment ps
|
||||||
|
|
||||||
|
# View logs from specific services
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs laconicd
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs console
|
||||||
|
```
|
||||||
|
|
||||||
|
## ConfigMaps
|
||||||
|
|
||||||
|
ConfigMaps allow you to mount configuration files into containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create the config directory in your deployment
|
||||||
|
mkdir -p my-deployment/data/my-config
|
||||||
|
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
||||||
|
|
||||||
|
# 2. Reference in spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Add to spec.yml:
|
||||||
|
```yaml
|
||||||
|
configmaps:
|
||||||
|
my-config: ./data/my-config
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 3. Restart to apply
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
||||||
|
|
||||||
|
## Deployment Directory Structure
|
||||||
|
|
||||||
|
A typical deployment directory contains:
|
||||||
|
|
||||||
|
```
|
||||||
|
my-deployment/
|
||||||
|
├── compose/
|
||||||
|
│ └── docker-compose-*.yml # Generated compose files
|
||||||
|
├── config.env # Environment variables
|
||||||
|
├── deployment.yml # Deployment metadata
|
||||||
|
├── spec.yml # Deployment specification
|
||||||
|
└── data/ # Volume mounts and configs
|
||||||
|
├── service-data/ # Persistent service data
|
||||||
|
└── config-maps/ # ConfigMap files
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
**Problem: "Cannot connect to Docker daemon"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure Docker is running
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Start Docker if needed (macOS)
|
||||||
|
open -a Docker
|
||||||
|
|
||||||
|
# Start Docker (Linux)
|
||||||
|
sudo systemctl start docker
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Port already in use"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Either stop the conflicting service or use different ports
|
||||||
|
# Edit spec.yml before creating deployment:
|
||||||
|
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8081:80' # Use 8081 instead of 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Image not found"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build containers first
|
||||||
|
laconic-so --stack your-stack build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Volumes not persisting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if you used --delete-volumes when stopping
|
||||||
|
# Volume data is in: <deployment-dir>/data/
|
||||||
|
|
||||||
|
# Don't use --delete-volumes if you want to keep data:
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Only use --delete-volumes when you want to reset completely:
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Services not starting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check logs for errors
|
||||||
|
laconic-so deployment --dir my-deployment logs
|
||||||
|
|
||||||
|
# Check Docker container status
|
||||||
|
docker ps -a
|
||||||
|
|
||||||
|
# Try stopping and starting again
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspecting Deployment State
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check deployment directory structure
|
||||||
|
ls -la my-deployment/
|
||||||
|
|
||||||
|
# Check running containers
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Check container details
|
||||||
|
docker inspect <container-name>
|
||||||
|
|
||||||
|
# Check networks
|
||||||
|
docker network ls
|
||||||
|
|
||||||
|
# Check volumes
|
||||||
|
docker volume ls
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Commands Reference
|
||||||
|
|
||||||
|
### Stack Operations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone required repositories
|
||||||
|
laconic-so --stack <name> setup-repositories
|
||||||
|
|
||||||
|
# Build container images
|
||||||
|
laconic-so --stack <name> build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Initialization
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec with defaults
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file>
|
||||||
|
|
||||||
|
# Initialize with configuration
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file> \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Creation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create deployment directory from spec
|
||||||
|
laconic-so --stack <name> deploy create \
|
||||||
|
--spec-file <spec-file> \
|
||||||
|
--deployment-dir <dir>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Management
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start all services
|
||||||
|
laconic-so deployment --dir <dir> start
|
||||||
|
|
||||||
|
# Stop services (preserves volumes)
|
||||||
|
laconic-so deployment --dir <dir> stop
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so deployment --dir <dir> stop --delete-volumes
|
||||||
|
|
||||||
|
# List running services
|
||||||
|
laconic-so deployment --dir <dir> ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
||||||
|
|
||||||
|
# Show mapped port
|
||||||
|
laconic-so deployment --dir <dir> port <service> <private-port>
|
||||||
|
|
||||||
|
# Execute command in service
|
||||||
|
laconic-so deployment --dir <dir> exec <service> <command>
|
||||||
|
|
||||||
|
# Update configuration
|
||||||
|
laconic-so deployment --dir <dir> update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Quick Deploy Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start stack directly
|
||||||
|
laconic-so --stack <name> deploy up
|
||||||
|
|
||||||
|
# Stop stack
|
||||||
|
laconic-so --stack <name> deploy down [--delete-volumes]
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack <name> deploy logs
|
||||||
|
|
||||||
|
# Show port mapping
|
||||||
|
laconic-so --stack <name> deploy port <service> <port>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
||||||
|
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
||||||
|
- [Specification](./spec.md) - Internal structure and design
|
||||||
|
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
||||||
|
- [Web App Deployment](./webapp.md) - Deploying web applications
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
For more examples, see the test scripts:
|
||||||
|
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
||||||
|
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
- Docker Compose is the default and recommended deployment mode
|
||||||
|
- Two workflows: deployment directory (recommended) or quick deploy
|
||||||
|
- The standard workflow is: setup → build → init → create → start
|
||||||
|
- Configuration is flexible with multiple override layers
|
||||||
|
- Volume persistence is automatic unless explicitly deleted
|
||||||
|
- All deployment state is contained in the deployment directory
|
||||||
|
- For Kubernetes deployments, see separate K8s documentation
|
||||||
|
|
||||||
|
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
||||||
113
docs/helm-chart-generation.md
Normal file
113
docs/helm-chart-generation.md
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Helm Chart Generation
|
||||||
|
|
||||||
|
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Install Kompose:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Linux
|
||||||
|
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
||||||
|
chmod +x kompose
|
||||||
|
sudo mv kompose /usr/local/bin/
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
brew install kompose
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
kompose version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### 1. Create spec file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Generate Helm chart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment \
|
||||||
|
--helm-chart
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Deploy to Kubernetes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install my-release my-deployment/chart
|
||||||
|
kubectl get pods -n zenith
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```bash
|
||||||
|
my-deployment/
|
||||||
|
├── spec.yml # Reference
|
||||||
|
├── stack.yml # Reference
|
||||||
|
└── chart/ # Helm chart
|
||||||
|
├── Chart.yaml
|
||||||
|
├── README.md
|
||||||
|
└── templates/
|
||||||
|
└── *.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate chart for stage1-zenithd
|
||||||
|
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output stage1-spec.yml
|
||||||
|
|
||||||
|
laconic-so --stack stage1-zenithd deploy create \
|
||||||
|
--spec-file stage1-spec.yml \
|
||||||
|
--deployment-dir stage1-deployment \
|
||||||
|
--helm-chart
|
||||||
|
|
||||||
|
# Deploy
|
||||||
|
helm install stage1-zenithd stage1-deployment/chart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Production Deployment (TODO)
|
||||||
|
|
||||||
|
### Local Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Access services using port-forward
|
||||||
|
kubectl port-forward service/zenithd 26657:26657
|
||||||
|
kubectl port-forward service/nginx-api-proxy 1317:80
|
||||||
|
kubectl port-forward service/cosmos-explorer 4173:4173
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production Access Options
|
||||||
|
|
||||||
|
- Option 1: Ingress + cert-manager (Recommended)
|
||||||
|
- Install ingress-nginx + cert-manager
|
||||||
|
- Point DNS to cluster LoadBalancer IP
|
||||||
|
- Auto-provisions Let's Encrypt TLS certs
|
||||||
|
- Access: `https://api.zenith.example.com`
|
||||||
|
- Option 2: Cloud LoadBalancer
|
||||||
|
- Use cloud provider's LoadBalancer service type
|
||||||
|
- Point DNS to assigned external IP
|
||||||
|
- Manual TLS cert management
|
||||||
|
- Option 3: Bare Metal (MetalLB + Ingress)
|
||||||
|
- MetalLB provides LoadBalancer IPs from local network
|
||||||
|
- Same Ingress setup as cloud
|
||||||
|
- Option 4: NodePort + External Proxy
|
||||||
|
- Expose services on 30000-32767 range
|
||||||
|
- External nginx/Caddy proxies 80/443 → NodePort
|
||||||
|
- Manual cert management
|
||||||
|
|
||||||
|
### Changes Needed
|
||||||
|
|
||||||
|
- Add Ingress template to charts
|
||||||
|
- Add TLS configuration to values.yaml
|
||||||
|
- Document cert-manager setup
|
||||||
|
- Add production deployment guide
|
||||||
@ -24,4 +24,3 @@ node-tolerations:
|
|||||||
value: typeb
|
value: typeb
|
||||||
```
|
```
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
||||||
|
|
||||||
|
|||||||
@ -26,4 +26,3 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
128
laconic-network-deployment.md
Normal file
128
laconic-network-deployment.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Deploying to the Laconic Network
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
||||||
|
|
||||||
|
## Key Documentation in stack-orchestrator
|
||||||
|
|
||||||
|
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
||||||
|
- `docs/webapp.md` - Webapp building/running
|
||||||
|
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
### LRN (Laconic Resource Name)
|
||||||
|
Format: `lrn://laconic/[namespace]/[name]`
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `lrn://laconic/deployers/my-deployer-name`
|
||||||
|
- `lrn://laconic/dns/example.com`
|
||||||
|
- `lrn://laconic/deployments/example.com`
|
||||||
|
|
||||||
|
### Registry Record Types
|
||||||
|
|
||||||
|
| Record Type | Purpose |
|
||||||
|
|-------------|---------|
|
||||||
|
| `ApplicationRecord` | Published app metadata |
|
||||||
|
| `WebappDeployer` | Deployment service offering |
|
||||||
|
| `ApplicationDeploymentRequest` | User's request to deploy |
|
||||||
|
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
||||||
|
| `ApplicationDeploymentRecord` | Completed deployment result |
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
### 1. Direct Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentRequest
|
||||||
|
→ targets specific WebappDeployer (by LRN)
|
||||||
|
→ includes payment TX hash
|
||||||
|
→ Deployer picks up request, builds, deploys, publishes result
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Auction-Based Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentAuction
|
||||||
|
→ Deployers bid (commit/reveal phases)
|
||||||
|
→ Winner selected
|
||||||
|
→ User publishes request targeting winner
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key CLI Commands
|
||||||
|
|
||||||
|
### Publish a Deployer Service
|
||||||
|
```bash
|
||||||
|
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
||||||
|
--api-url https://deployer-api.example.com \
|
||||||
|
--name my-deployer \
|
||||||
|
--payment-address laconic1... \
|
||||||
|
--minimum-payment 1000alnt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Request Deployment (User Side)
|
||||||
|
```bash
|
||||||
|
laconic-so request-webapp-deployment --laconic-config config.yml \
|
||||||
|
--app lrn://laconic/apps/my-app \
|
||||||
|
--deployer lrn://laconic/deployers/xyz \
|
||||||
|
--make-payment auto
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Deployer Service (Deployer Side)
|
||||||
|
```bash
|
||||||
|
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
||||||
|
```
|
||||||
|
|
||||||
|
## Laconic Config File
|
||||||
|
|
||||||
|
All tools require a laconic config file (`laconic.toml`):
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[cosmos]
|
||||||
|
address_prefix = "laconic"
|
||||||
|
chain_id = "laconic_9000-1"
|
||||||
|
endpoint = "http://localhost:26657"
|
||||||
|
key = "<account-name>"
|
||||||
|
password = "<account-password>"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting Up a Local Laconicd Network
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone and build
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy create
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Implementation Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `publish_webapp_deployer.py` | Register deployment service on network |
|
||||||
|
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
||||||
|
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
||||||
|
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
||||||
|
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
||||||
|
| `request_webapp_undeployment.py` | Request app removal |
|
||||||
|
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
||||||
|
| `util.py` | LaconicRegistryClient - all registry interactions |
|
||||||
|
|
||||||
|
## Payment System
|
||||||
|
|
||||||
|
- **Token Denom**: `alnt` (Laconic network tokens)
|
||||||
|
- **Payment Options**:
|
||||||
|
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
||||||
|
- `--use-payment`: Reference existing payment TX
|
||||||
|
|
||||||
|
## What's NOT Well-Documented
|
||||||
|
|
||||||
|
1. No end-to-end tutorial for full deployment workflow
|
||||||
|
2. Stack publishing (vs webapp) process unclear
|
||||||
|
3. LRN naming conventions not formally specified
|
||||||
|
4. Payment economics and token mechanics
|
||||||
110
pyproject.toml
Normal file
110
pyproject.toml
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "laconic-stack-orchestrator"
|
||||||
|
version = "1.1.0"
|
||||||
|
description = "Orchestrates deployment of the Laconic stack"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "GNU Affero General Public License"}
|
||||||
|
authors = [
|
||||||
|
{name = "Cerc", email = "info@cerc.io"}
|
||||||
|
]
|
||||||
|
requires-python = ">=3.8"
|
||||||
|
classifiers = [
|
||||||
|
"Programming Language :: Python :: 3.8",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"python-decouple>=3.8",
|
||||||
|
"python-dotenv==1.0.0",
|
||||||
|
"GitPython>=3.1.32",
|
||||||
|
"tqdm>=4.65.0",
|
||||||
|
"python-on-whales>=0.64.0",
|
||||||
|
"click>=8.1.6",
|
||||||
|
"PyYAML>=6.0.1",
|
||||||
|
"ruamel.yaml>=0.17.32",
|
||||||
|
"pydantic==1.10.9",
|
||||||
|
"tomli==2.0.1",
|
||||||
|
"validators==0.22.0",
|
||||||
|
"kubernetes>=28.1.0",
|
||||||
|
"humanfriendly>=10.0",
|
||||||
|
"python-gnupg>=0.5.2",
|
||||||
|
"requests>=2.3.2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0.0",
|
||||||
|
"pytest-cov>=4.0.0",
|
||||||
|
"black>=22.0.0",
|
||||||
|
"flake8>=5.0.0",
|
||||||
|
"pyright>=1.1.0",
|
||||||
|
"yamllint>=1.28.0",
|
||||||
|
"pre-commit>=3.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
laconic-so = "stack_orchestrator.main:cli"
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
|
||||||
|
[tool.setuptools.package-data]
|
||||||
|
"*" = ["data/**"]
|
||||||
|
|
||||||
|
[tool.black]
|
||||||
|
line-length = 88
|
||||||
|
target-version = ['py38']
|
||||||
|
|
||||||
|
[tool.flake8]
|
||||||
|
max-line-length = 88
|
||||||
|
extend-ignore = ["E203", "W503", "E402"]
|
||||||
|
|
||||||
|
[tool.pyright]
|
||||||
|
pythonVersion = "3.9"
|
||||||
|
typeCheckingMode = "basic"
|
||||||
|
reportMissingImports = "none"
|
||||||
|
reportMissingModuleSource = "none"
|
||||||
|
reportUnusedImport = "error"
|
||||||
|
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
||||||
|
exclude = ["**/build/**", "**/__pycache__/**"]
|
||||||
|
|
||||||
|
[tool.mypy]
|
||||||
|
python_version = "3.8"
|
||||||
|
warn_return_any = true
|
||||||
|
warn_unused_configs = true
|
||||||
|
disallow_untyped_defs = true
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
python_classes = ["Test*"]
|
||||||
|
python_functions = ["test_*"]
|
||||||
|
markers = [
|
||||||
|
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||||
|
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
||||||
|
]
|
||||||
|
addopts = [
|
||||||
|
"--cov",
|
||||||
|
"--cov-report=term-missing",
|
||||||
|
"--cov-report=html",
|
||||||
|
"--strict-markers",
|
||||||
|
]
|
||||||
|
asyncio_default_fixture_loop_scope = "function"
|
||||||
|
|
||||||
|
[tool.coverage.run]
|
||||||
|
source = ["stack_orchestrator"]
|
||||||
|
disable_warnings = ["couldnt-parse"]
|
||||||
|
|
||||||
|
[tool.coverage.report]
|
||||||
|
exclude_lines = [
|
||||||
|
"pragma: no cover",
|
||||||
|
"def __repr__",
|
||||||
|
"raise AssertionError",
|
||||||
|
"raise NotImplementedError",
|
||||||
|
]
|
||||||
9
pyrightconfig.json
Normal file
9
pyrightconfig.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"pythonVersion": "3.9",
|
||||||
|
"typeCheckingMode": "basic",
|
||||||
|
"reportMissingImports": "none",
|
||||||
|
"reportMissingModuleSource": "none",
|
||||||
|
"reportUnusedImport": "error",
|
||||||
|
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
||||||
|
"exclude": ["**/build/**", "**/__pycache__/**"]
|
||||||
|
}
|
||||||
26
setup.py
26
setup.py
@ -1,5 +1,7 @@
|
|||||||
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
# See
|
||||||
|
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
@ -7,26 +9,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name="laconic-stack-orchestrator",
|
||||||
version=version,
|
version=version,
|
||||||
author='Cerc',
|
author="Cerc",
|
||||||
author_email='info@cerc.io',
|
author_email="info@cerc.io",
|
||||||
license='GNU Affero General Public License',
|
license="GNU Affero General Public License",
|
||||||
description='Orchestrates deployment of the Laconic stack',
|
description="Orchestrates deployment of the Laconic stack",
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=["stack_orchestrator"],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires='>=3.7',
|
python_requires=">=3.7",
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={'': ['data/**']},
|
package_data={"": ["data/**"]},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,11 +23,10 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return base_stack(config, stack)
|
return default_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -41,15 +40,27 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class default_stack(base_stack):
|
||||||
|
"""Default stack implementation for stacks without specific handling."""
|
||||||
|
|
||||||
|
def ensure_available(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_url(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class package_registry_stack(base_stack):
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
print(
|
||||||
|
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
||||||
|
f"{url_from_environment}"
|
||||||
|
)
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -62,10 +73,16 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the caller
|
# If not, print a message about how to start it and return fail to the
|
||||||
print("ERROR: The package-registry stack is not running, and no external registry "
|
# caller
|
||||||
"specified with CERC_NPM_REGISTRY_URL")
|
print(
|
||||||
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
"ERROR: The package-registry stack is not running, "
|
||||||
|
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"ERROR: Start the local package registry with: "
|
||||||
|
"laconic-so --stack package-registry deploy-system up"
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -76,7 +93,9 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
default_npm_registry_url = (
|
||||||
"CERC_NPM_AUTH_TOKEN", default=None
|
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
||||||
|
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
|
)
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -34,14 +35,17 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars:
|
||||||
|
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(
|
||||||
container_build_dir: str,
|
dev_root_path: str,
|
||||||
debug: bool,
|
container_build_dir: str,
|
||||||
force_rebuild: bool,
|
debug: bool,
|
||||||
extra_build_args: str):
|
force_rebuild: bool,
|
||||||
|
extra_build_args: str,
|
||||||
|
):
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -50,11 +54,15 @@ def make_container_build_env(dev_root_path: str,
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
container_build_env.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -67,12 +75,18 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
build_context.container_build_env.update(
|
||||||
|
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
||||||
|
)
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
"container-build"
|
||||||
|
)
|
||||||
|
temp_build_dir = container_parent_dir.joinpath(
|
||||||
|
build_context.container.replace("/", "-")
|
||||||
|
)
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -90,21 +104,34 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(
|
||||||
repo_dir = build_context.container.split('/')[1]
|
f"No script file found: {build_script_filename}, "
|
||||||
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
"using default build script"
|
||||||
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
)
|
||||||
|
repo_dir = build_context.container.split("/")[1]
|
||||||
|
# TODO: make this less of a hack -- should be specified in
|
||||||
|
# some metadata somewhere. Check if we have a repo for this
|
||||||
|
# container. If not, set the context dir to container-build subdir
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = (
|
||||||
build_command = os.path.join(build_context.container_build_dir,
|
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
)
|
||||||
|
build_command = (
|
||||||
|
os.path.join(build_context.container_build_dir, "default-build.sh")
|
||||||
|
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
print(
|
||||||
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
f"Executing: {build_command} with environment: "
|
||||||
|
f"{build_context.container_build_env}"
|
||||||
|
)
|
||||||
|
build_result = subprocess.run(
|
||||||
|
build_command, shell=True, env=build_context.container_build_env
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -117,33 +144,61 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option("--include", help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option("--exclude", help="don't build these containers")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
@click.option(
|
||||||
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
"--publish-images",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Publish the built images in the specified image registry",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", help="Specify the image registry for --publish-images"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
def command(
|
||||||
'''build the set of containers required for a complete stack'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
publish_images,
|
||||||
|
image_registry,
|
||||||
|
):
|
||||||
|
"""build the set of containers required for a complete stack"""
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -151,21 +206,22 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(
|
||||||
container_build_dir,
|
dev_root_path,
|
||||||
opts.o.debug,
|
container_build_dir,
|
||||||
force_rebuild,
|
opts.o.debug,
|
||||||
extra_build_args)
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
)
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path
|
dev_root_path,
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -174,10 +230,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit("container build failed and --continue-on-error not set, exiting")
|
error_exit(
|
||||||
|
"container build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Container Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Container Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,14 +32,18 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these packages")
|
@click.option("--include", help="only build these packages")
|
||||||
@click.option('--exclude', help="don\'t build these packages")
|
@click.option("--exclude", help="don't build these packages")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False,
|
@click.option(
|
||||||
help="Override existing target package version check -- force rebuild")
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override existing target package version check -- force rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
'''build the set of npm packages required for a complete stack'''
|
"""build the set of npm packages required for a complete stack"""
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -65,45 +69,54 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Build root directory doesn\'t exist, creating')
|
print("Build root directory doesn't exist, creating")
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "npm-package-list.txt"
|
||||||
|
) as package_list_file:
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config['npms']
|
packages_in_scope = stack_config["npms"]
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Packages: {packages_in_scope}')
|
print(f"Packages: {packages_in_scope}")
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
# Copy the repo and build that to avoid propagating
|
||||||
|
# JS tooling file changes back into the cloned repo
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -116,41 +129,63 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
build_command = [
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"cd /workspace && "
|
||||||
|
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
||||||
|
]
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
# so for now we use dict.update:
|
||||||
}
|
envs = {
|
||||||
|
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
|
# Convention used by our web app packages
|
||||||
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
||||||
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
envs.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
docker.run(builder_js_image_name,
|
docker.run(
|
||||||
remove=True,
|
builder_js_image_name,
|
||||||
interactive=True,
|
remove=True,
|
||||||
tty=True,
|
interactive=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
tty=True,
|
||||||
envs=envs,
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
envs=envs,
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
# TODO: detect this host name in npm_registry_url
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
# rather than hard-wiring it
|
||||||
command=build_command
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
)
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
# Note that although the docs say that build_result should contain
|
command=build_command,
|
||||||
# the command output as a string, in reality it is always the empty string.
|
)
|
||||||
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
# Note that although the docs say that build_result should
|
||||||
|
# contain the command output as a string, in reality it is
|
||||||
|
# always the empty string. Since we detect errors via catching
|
||||||
|
# exceptions below, we can safely ignore it here.
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
print(
|
||||||
|
"FATAL Error: build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -168,6 +203,12 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
print(
|
||||||
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
f"FATAL: builder image: {builder_js_image_name} is required "
|
||||||
|
"but was not found"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Please run this command to create it: "
|
||||||
|
"laconic-so --stack build-support build-containers"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -24,6 +24,5 @@ class BuildContext:
|
|||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str,str]
|
container_build_env: Mapping[str, str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,21 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config["containers"]
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "container-image-list.txt"
|
||||||
|
) as container_list_file:
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Containers: {containers_in_scope}')
|
print(f"Containers: {containers_in_scope}")
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -30,49 +31,57 @@ from stack_orchestrator.build import build_containers
|
|||||||
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
from stack_orchestrator.build.build_types import BuildContext
|
||||||
|
|
||||||
def create_env_file(env_vars, repo_root):
|
|
||||||
env_file_path = os.path.join(repo_root, '.env')
|
|
||||||
with open(env_file_path, 'w') as env_file:
|
|
||||||
for key, value in env_vars.items():
|
|
||||||
env_file.write(f"{key}={value}\n")
|
|
||||||
return env_file_path
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option("--base-container")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option(
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
"--source-repo", help="directory containing the webapp to build", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.option("--env", help="Environment variables for webapp (format: KEY1=VALUE1,KEY2=VALUE2)", default="")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag, env):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
'''build the specified webapp container'''
|
"""build the specified webapp container"""
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
logger.log(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f'Dev Root is: {dev_root_path}')
|
logger.log(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(
|
||||||
force_rebuild, extra_build_args)
|
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -92,31 +101,13 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script,
|
||||||
|
# but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
||||||
# Check if Dockerfile exists in the repository
|
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
||||||
repo_dockerfile = os.path.join(container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"], "Dockerfile")
|
)
|
||||||
default_dockerfile = os.path.join(container_build_dir,
|
|
||||||
base_container.replace("/", "-"),
|
|
||||||
"Dockerfile.webapp")
|
|
||||||
|
|
||||||
if os.path.isfile(repo_dockerfile):
|
|
||||||
env_vars = {}
|
|
||||||
if env:
|
|
||||||
for pair in env.split(','):
|
|
||||||
key, value = pair.split('=')
|
|
||||||
env_vars[key.strip()] = value.strip()
|
|
||||||
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = repo_dockerfile
|
|
||||||
|
|
||||||
# Create .env file with environment variables
|
|
||||||
env_file_path = create_env_file(env_vars, container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"])
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_ENV_FILE"] = env_file_path
|
|
||||||
else:
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = default_dockerfile
|
|
||||||
|
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,7 +52,8 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
# $ curl -u "my-username:my-token" -X GET \
|
||||||
|
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -60,7 +61,9 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
response = requests.get(
|
||||||
|
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
||||||
|
)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -68,7 +71,10 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
error_exit(
|
||||||
|
f"failed to fetch tags from image registry, "
|
||||||
|
f"status code: {response.status_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -79,9 +85,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(container: str,
|
def _filter_for_platform(
|
||||||
registry_info: RegistryInfo,
|
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
||||||
tag_list: List[str]) -> List[str] :
|
) -> List[str]:
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -98,7 +104,7 @@ def _filter_for_platform(container: str,
|
|||||||
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"manifest: {manifest}")
|
print(f"manifest: {manifest}")
|
||||||
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image_architecture: {image_architecture}")
|
print(f"image_architecture: {image_architecture}")
|
||||||
if this_machine == image_architecture:
|
if this_machine == image_architecture:
|
||||||
@ -137,21 +143,44 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only fetch these containers")
|
@click.option("--include", help="only fetch these containers")
|
||||||
@click.option('--exclude', help="don\'t fetch these containers")
|
@click.option("--exclude", help="don't fetch these containers")
|
||||||
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
@click.option(
|
||||||
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
"--force-local-overwrite",
|
||||||
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
is_flag=True,
|
||||||
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
default=False,
|
||||||
|
help="Overwrite a locally built image, if present",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-username", required=True, help="Specify the image registry username"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-token", required=True, help="Specify the image registry access token"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
def command(
|
||||||
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_local_overwrite,
|
||||||
|
image_registry,
|
||||||
|
registry_username,
|
||||||
|
registry_token,
|
||||||
|
):
|
||||||
|
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
docker.login(
|
||||||
|
registry_info.registry,
|
||||||
|
registry_info.registry_username,
|
||||||
|
registry_info.registry_token,
|
||||||
|
)
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -172,19 +201,24 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if (_exists_locally(container)):
|
if _exists_locally(container):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if (force_local_overwrite):
|
if force_local_overwrite:
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Warning: overwriting local tag from this image: {container} because "
|
print(
|
||||||
"--force-local-overwrite was specified")
|
f"Warning: overwriting local tag from this image: "
|
||||||
|
f"{container} because --force-local-overwrite was specified"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Skipping local tagging for this image: {container} because that would "
|
print(
|
||||||
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
f"Skipping local tagging for this image: {container} "
|
||||||
|
"because that would overwrite an existing :local tagged "
|
||||||
|
"image, use --force-local-overwrite to do so."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -192,4 +226,7 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
print(
|
||||||
|
"Warning: couldn't find usable images for one or more containers, "
|
||||||
|
"this stack will not deploy"
|
||||||
|
)
|
||||||
|
|||||||
@ -39,3 +39,8 @@ node_affinities_key = "node-affinities"
|
|||||||
node_tolerations_key = "node-tolerations"
|
node_tolerations_key = "node-tolerations"
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
|
cri_base_filename = "cri-base.json"
|
||||||
|
unlimited_memlock_key = "unlimited-memlock"
|
||||||
|
runtime_class_key = "runtime-class"
|
||||||
|
high_memlock_runtime = "high-memlock"
|
||||||
|
high_memlock_spec_filename = "high-memlock-spec.json"
|
||||||
|
|||||||
@ -14,4 +14,3 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -17,4 +17,3 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -29,4 +29,3 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12,7 +12,10 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
command = (
|
||||||
|
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
||||||
|
f"-d {db_name} -c --inserts -f {file_name}"
|
||||||
|
)
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -940,4 +940,3 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -18,4 +18,3 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -11,6 +11,8 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data['el_premine'].items():
|
for key, value in data["el_premine"].items():
|
||||||
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
acct = w3.eth.account.from_mnemonic(
|
||||||
|
data["mnemonic"], account_path=key, passphrase=""
|
||||||
|
)
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -26,8 +26,14 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
|
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||||
|
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
|
./build-webapp.sh || exit 1
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
if [ -f "next.config.mjs" ]; then
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
NEXT_CONFIG_JS="next.config.mjs"
|
||||||
IMPORT_OR_REQUIRE="import"
|
IMPORT_OR_REQUIRE="import"
|
||||||
|
|||||||
@ -30,36 +30,44 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
if [ -f "./run-webapp.sh" ]; then
|
||||||
mv .next .next.old
|
echo "Running webapp with run-webapp.sh ..."
|
||||||
mv .next-r/.next .
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
./run-webapp.sh &
|
||||||
|
tpid=$!
|
||||||
|
wait $tpid
|
||||||
|
else
|
||||||
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
|
mv .next .next.old
|
||||||
|
mv .next-r/.next .
|
||||||
|
|
||||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
npm run cerc_generate > gen.out 2>&1 &
|
npm run cerc_generate > gen.out 2>&1 &
|
||||||
tail -f gen.out &
|
tail -f gen.out &
|
||||||
tpid=$!
|
tpid=$!
|
||||||
|
|
||||||
count=0
|
count=0
|
||||||
generate_done="false"
|
generate_done="false"
|
||||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
grep 'rendered as static' gen.out > /dev/null
|
grep 'rendered as static' gen.out > /dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
generate_done="true"
|
generate_done="true"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $generate_done != "true" ]; then
|
||||||
|
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
if [ $generate_done != "true" ]; then
|
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
tpid=""
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
|
||||||
tpid=""
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
|
fi
|
||||||
|
|||||||
@ -5,4 +5,3 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN yarn
|
RUN yarn
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1,260 @@
|
|||||||
|
# Caddy Ingress Controller for kind
|
||||||
|
# Based on: https://github.com/caddyserver/ingress
|
||||||
|
# Provides automatic HTTPS with Let's Encrypt
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
- endpoints
|
||||||
|
- nodes
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses/status
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingressclasses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller-configmap
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
data:
|
||||||
|
# Caddy global options
|
||||||
|
acmeCA: "https://acme-v02.api.letsencrypt.org/directory"
|
||||||
|
email: ""
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
targetPort: https
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
serviceAccountName: caddy-ingress-controller
|
||||||
|
terminationGracePeriodSeconds: 60
|
||||||
|
nodeSelector:
|
||||||
|
ingress-ready: "true"
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/master
|
||||||
|
operator: Equal
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Equal
|
||||||
|
containers:
|
||||||
|
- name: caddy-ingress-controller
|
||||||
|
image: caddy/ingress:latest
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: 80
|
||||||
|
hostPort: 80
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
containerPort: 443
|
||||||
|
hostPort: 443
|
||||||
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
args:
|
||||||
|
- -config-map=caddy-system/caddy-ingress-controller-configmap
|
||||||
|
- -class-name=caddy
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 128Mi
|
||||||
|
limits:
|
||||||
|
cpu: 1000m
|
||||||
|
memory: 512Mi
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_BIND_SERVICE
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsUser: 0
|
||||||
|
runAsGroup: 0
|
||||||
|
volumeMounts:
|
||||||
|
- name: caddy-data
|
||||||
|
mountPath: /data
|
||||||
|
- name: caddy-config
|
||||||
|
mountPath: /config
|
||||||
|
volumes:
|
||||||
|
- name: caddy-data
|
||||||
|
emptyDir: {}
|
||||||
|
- name: caddy-config
|
||||||
|
emptyDir: {}
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: IngressClass
|
||||||
|
metadata:
|
||||||
|
name: caddy
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
annotations:
|
||||||
|
ingressclass.kubernetes.io/is-default-class: "true"
|
||||||
|
spec:
|
||||||
|
controller: caddy.io/ingress-controller
|
||||||
@ -14,4 +14,3 @@ containers:
|
|||||||
pods:
|
pods:
|
||||||
- fixturenet-blast
|
- fixturenet-blast
|
||||||
- foundry
|
- foundry
|
||||||
|
|
||||||
@ -3,4 +3,3 @@
|
|||||||
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
||||||
|
|
||||||
TODO: write me
|
TODO: write me
|
||||||
|
|
||||||
|
|||||||
@ -14,26 +14,25 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from ruamel.yaml import YAML
|
|
||||||
|
|
||||||
|
|
||||||
def create(context: DeploymentContext, extra_args):
|
def create(context: DeploymentContext, extra_args):
|
||||||
# Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1
|
# Slightly modify the base fixturenet-eth compose file to replace the
|
||||||
# We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the
|
# startup script for fixturenet-eth-geth-1
|
||||||
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
|
# We need to start geth with the flag to allow non eip-155 compliant
|
||||||
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
|
# transactions in order to publish the
|
||||||
|
# deterministic-deployment-proxy contract, which itself is a prereq for
|
||||||
|
# Optimism contract deployment
|
||||||
|
fixturenet_eth_compose_file = context.deployment_dir.joinpath(
|
||||||
|
"compose", "docker-compose-fixturenet-eth.yml"
|
||||||
|
)
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
|
new_script = "../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh"
|
||||||
yaml = YAML()
|
|
||||||
yaml_data = yaml.load(yaml_file)
|
|
||||||
|
|
||||||
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
|
def add_geth_volume(yaml_data):
|
||||||
|
if new_script not in yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"]:
|
||||||
|
yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"].append(new_script)
|
||||||
|
|
||||||
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
|
||||||
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
|
|
||||||
yaml = YAML()
|
|
||||||
yaml.dump(yaml_data, yaml_file)
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@ -22,18 +22,24 @@ import yaml
|
|||||||
def create(context, extra_args):
|
def create(context, extra_args):
|
||||||
# Our goal here is just to copy the json files for blast
|
# Our goal here is just to copy the json files for blast
|
||||||
yml_path = context.deployment_dir.joinpath("spec.yml")
|
yml_path = context.deployment_dir.joinpath("spec.yml")
|
||||||
with open(yml_path, 'r') as file:
|
with open(yml_path, "r") as file:
|
||||||
data = yaml.safe_load(file)
|
data = yaml.safe_load(file)
|
||||||
|
|
||||||
mount_point = data['volumes']['blast-data']
|
mount_point = data["volumes"]["blast-data"]
|
||||||
if mount_point[0] == "/":
|
if mount_point[0] == "/":
|
||||||
deploy_dir = Path(mount_point)
|
deploy_dir = Path(mount_point)
|
||||||
else:
|
else:
|
||||||
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
||||||
|
|
||||||
command_context = extra_args[2]
|
command_context = extra_args[2]
|
||||||
compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0]
|
compose_file = [
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json")
|
f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f
|
||||||
|
][0]
|
||||||
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "genesis.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json")
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "rollup.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
|
|||||||
@ -27,6 +27,8 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_plugeth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -27,6 +27,8 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
# Laconic Mainnet Deployment (experimental)
|
# Laconic Mainnet Deployment (experimental)
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,10 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import (
|
||||||
|
DeployCommandContext,
|
||||||
|
LaconicStackSetupCommand,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
@ -75,7 +78,12 @@ def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
|||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
gentx_file_path = Path(gentx_file)
|
gentx_file_path = Path(gentx_file)
|
||||||
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
copyfile(
|
||||||
|
gentx_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", "gentx", os.path.basename(gentx_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _remove_persistent_peers(network_dir: Path):
|
def _remove_persistent_peers(network_dir: Path):
|
||||||
@ -86,8 +94,13 @@ def _remove_persistent_peers(network_dir: Path):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
||||||
replace_with = "persistent_peers = \"\""
|
replace_with = 'persistent_peers = ""'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -100,8 +113,13 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = r'^persistent_peers = ""'
|
persistent_peers_pattern = r'^persistent_peers = ""'
|
||||||
replace_with = f"persistent_peers = \"{new_persistent_peers}\""
|
replace_with = f'persistent_peers = "{new_persistent_peers}"'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -113,9 +131,11 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
cors_pattern = r'^cors_allowed_origins = \[]'
|
cors_pattern = r"^cors_allowed_origins = \[]"
|
||||||
replace_with = 'cors_allowed_origins = ["*"]'
|
replace_with = 'cors_allowed_origins = ["*"]'
|
||||||
config_file_content = re.sub(cors_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
@ -124,9 +144,11 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(app_file_path, "r") as input_file:
|
with open(app_file_path, "r") as input_file:
|
||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
cors_pattern = r'^enabled-unsafe-cors = false'
|
cors_pattern = r"^enabled-unsafe-cors = false"
|
||||||
replace_with = "enabled-unsafe-cors = true"
|
replace_with = "enabled-unsafe-cors = true"
|
||||||
app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
@ -141,7 +163,9 @@ def _set_listen_address(config_dir: Path):
|
|||||||
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
||||||
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
||||||
print(f"Replacing in: {config_file_path}")
|
print(f"Replacing in: {config_file_path}")
|
||||||
config_file_content = re.sub(existing_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
existing_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
@ -152,10 +176,14 @@ def _set_listen_address(config_dir: Path):
|
|||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
||||||
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
||||||
app_file_content = re.sub(existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
existing_pattern2 = r'^address = "localhost:9090"'
|
existing_pattern2 = r'^address = "localhost:9090"'
|
||||||
replace_with2 = 'address = "0.0.0.0:9090"'
|
replace_with2 = 'address = "0.0.0.0:9090"'
|
||||||
app_file_content = re.sub(existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
@ -164,7 +192,10 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.ILLEGAL
|
phase = SetupPhase.ILLEGAL
|
||||||
if parameters.initialize_network:
|
if parameters.initialize_network:
|
||||||
if parameters.join_network or parameters.create_network:
|
if parameters.join_network or parameters.create_network:
|
||||||
print("Can't supply --join-network or --create-network with --initialize-network")
|
print(
|
||||||
|
"Can't supply --join-network or --create-network "
|
||||||
|
"with --initialize-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not parameters.chain_id:
|
if not parameters.chain_id:
|
||||||
print("--chain-id is required")
|
print("--chain-id is required")
|
||||||
@ -176,24 +207,36 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.INITIALIZE
|
phase = SetupPhase.INITIALIZE
|
||||||
elif parameters.join_network:
|
elif parameters.join_network:
|
||||||
if parameters.initialize_network or parameters.create_network:
|
if parameters.initialize_network or parameters.create_network:
|
||||||
print("Can't supply --initialize-network or --create-network with --join-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --create-network "
|
||||||
|
"with --join-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.JOIN
|
phase = SetupPhase.JOIN
|
||||||
elif parameters.create_network:
|
elif parameters.create_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --create-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --create-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CREATE
|
phase = SetupPhase.CREATE
|
||||||
elif parameters.connect_network:
|
elif parameters.connect_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --connect-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --connect-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CONNECT
|
phase = SetupPhase.CONNECT
|
||||||
return phase
|
return phase
|
||||||
|
|
||||||
|
|
||||||
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
def setup(
|
||||||
|
command_context: DeployCommandContext,
|
||||||
|
parameters: LaconicStackSetupCommand,
|
||||||
|
extra_args,
|
||||||
|
):
|
||||||
options = opts.o
|
options = opts.o
|
||||||
|
|
||||||
currency = "alnt" # Does this need to be a parameter?
|
currency = "alnt" # Does this need to be a parameter?
|
||||||
@ -205,12 +248,9 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
|
|
||||||
network_dir = Path(parameters.network_dir).absolute()
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
laconicd_home_path_in_container = "/laconicd-home"
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [
|
mounts = [VolumeMapping(str(network_dir), laconicd_home_path_in_container)]
|
||||||
VolumeMapping(network_dir, laconicd_home_path_in_container)
|
|
||||||
]
|
|
||||||
|
|
||||||
if phase == SetupPhase.INITIALIZE:
|
if phase == SetupPhase.INITIALIZE:
|
||||||
|
|
||||||
# We want to create the directory so if it exists that's an error
|
# We want to create the directory so if it exists that's an error
|
||||||
if os.path.exists(network_dir):
|
if os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} already exists")
|
print(f"Error: network directory {network_dir} already exists")
|
||||||
@ -220,13 +260,18 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
|
|
||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
"laconicd",
|
||||||
--chain-id {parameters.chain_id} --default-denom {currency}", mounts)
|
f"laconicd init {parameters.node_moniker} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
f"--chain-id {parameters.chain_id} --default-denom {currency}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output}")
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
elif phase == SetupPhase.JOIN:
|
elif phase == SetupPhase.JOIN:
|
||||||
# In the join phase (alternative to connect) we are participating in a genesis ceremony for the chain
|
# In the join phase (alternative to connect) we are participating in a
|
||||||
|
# genesis ceremony for the chain
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -234,52 +279,72 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
chain_id = _get_chain_id_from_config(network_dir)
|
chain_id = _get_chain_id_from_config(network_dir)
|
||||||
|
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
f"laconicd genesis add-genesis-account {parameters.key_name} "
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test",
|
f"12900000000000000000000{currency} "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output2}")
|
print(f"Command output: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
f"laconicd genesis gentx {parameters.key_name} "
|
||||||
--chain-id {chain_id} --keyring-backend test",
|
f"90000000000{currency} --home {laconicd_home_path_in_container} "
|
||||||
mounts)
|
f"--chain-id {chain_id} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output3}")
|
print(f"Command output: {output3}")
|
||||||
output4, status4 = run_container_command(
|
output4, status4 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output4}")
|
print(f"Node account address: {output4}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CONNECT:
|
elif phase == SetupPhase.CONNECT:
|
||||||
# In the connect phase (named to not conflict with join) we are making a node that syncs a chain with existing genesis.json
|
# In the connect phase (named to not conflict with join) we are
|
||||||
# but not with validator role. We need this kind of node in order to bootstrap it into a validator after it syncs
|
# making a node that syncs a chain with existing genesis.json
|
||||||
|
# but not with validator role. We need this kind of node in order to
|
||||||
|
# bootstrap it into a validator after it syncs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output2}")
|
print(f"Node account address: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd cometbft show-validator --home {laconicd_home_path_in_container}",
|
f"laconicd cometbft show-validator "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node validator address: {output3}")
|
print(f"Node validator address: {output3}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CREATE:
|
elif phase == SetupPhase.CREATE:
|
||||||
@ -287,42 +352,74 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves
|
# In the CREATE phase, we are either a "coordinator" node,
|
||||||
# OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node.
|
# generating the genesis.json file ourselves
|
||||||
|
# OR we are a "not-coordinator" node, consuming a genesis file from
|
||||||
|
# the coordinator node.
|
||||||
if parameters.genesis_file:
|
if parameters.genesis_file:
|
||||||
# We got the genesis file from elsewhere
|
# We got the genesis file from elsewhere
|
||||||
# Copy it into our network dir
|
# Copy it into our network dir
|
||||||
genesis_file_path = Path(parameters.genesis_file)
|
genesis_file_path = Path(parameters.genesis_file)
|
||||||
if not os.path.exists(genesis_file_path):
|
if not os.path.exists(genesis_file_path):
|
||||||
print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.")
|
print(
|
||||||
|
f"Error: supplied genesis file: {parameters.genesis_file} "
|
||||||
|
"does not exist."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
copyfile(
|
||||||
|
genesis_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", os.path.basename(genesis_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# We're generating the genesis file
|
# We're generating the genesis file
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list)
|
other_node_keys = _get_node_keys_from_gentx_files(
|
||||||
|
parameters.gentx_address_list
|
||||||
|
)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
|
outputk = None
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis add-genesis-account {other_node_key} \
|
command_context,
|
||||||
12900000000000000000000{currency}\
|
"laconicd",
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
f"laconicd genesis add-genesis-account {other_node_key} "
|
||||||
if options.debug:
|
f"12900000000000000000000{currency} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
"--keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
|
if options.debug and outputk is not None:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis collect-gentxs "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
print(f"Generated genesis file, please copy to other nodes as required: \
|
genesis_path = os.path.join(network_dir, "config", "genesis.json")
|
||||||
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
print(
|
||||||
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
f"Generated genesis file, please copy to other nodes "
|
||||||
|
f"as required: {genesis_path}"
|
||||||
|
)
|
||||||
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers
|
||||||
|
# in config.toml so we remove that now
|
||||||
_remove_persistent_peers(network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis validate-genesis "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"validate-genesis result: {output2}")
|
print(f"validate-genesis result: {output2}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -341,15 +438,23 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
config_dir_path = network_dir_path.joinpath("config")
|
config_dir_path = network_dir_path.joinpath("config")
|
||||||
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a config directory: {config_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
data_dir_path = network_dir_path.joinpath("data")
|
data_dir_path = network_dir_path.joinpath("data")
|
||||||
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a data directory: {data_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Copy the network directory contents into our deployment
|
# Copy the network directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
|
deployment_config_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-config"
|
||||||
|
)
|
||||||
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
# If supplied, add the initial persistent peers to the config file
|
# If supplied, add the initial persistent peers to the config file
|
||||||
if extra_args[1]:
|
if extra_args[1]:
|
||||||
@ -360,7 +465,9 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
_set_listen_address(deployment_config_dir)
|
_set_listen_address(deployment_config_dir)
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
deployment_data_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-data"
|
||||||
|
)
|
||||||
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -27,3 +27,25 @@ The Package Registry Stack supports a build environment that requires a package
|
|||||||
```
|
```
|
||||||
|
|
||||||
* The local gitea registry can now be accessed at <http://localhost:3000> (the username and password can be taken from the deployment logs)
|
* The local gitea registry can now be accessed at <http://localhost:3000> (the username and password can be taken from the deployment logs)
|
||||||
|
|
||||||
|
* Configure the hostname `gitea.local`:
|
||||||
|
|
||||||
|
Update `/etc/hosts`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /etc/hosts
|
||||||
|
|
||||||
|
# Add the following line
|
||||||
|
127.0.0.1 gitea.local
|
||||||
|
```
|
||||||
|
|
||||||
|
Check resolution:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ping gitea.local
|
||||||
|
|
||||||
|
PING gitea.local (127.0.0.1) 56(84) bytes of data.
|
||||||
|
64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.147 ms
|
||||||
|
64 bytes from localhost (127.0.0.1): icmp_seq=2 ttl=64 time=0.033 ms
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|||||||
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -24,16 +25,20 @@ default_spec_file_content = """config:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# Output a known string to a know file in the bind mounted directory ./container-output-dir
|
# Output a known string to a know file in the bind mounted directory
|
||||||
|
# ./container-output-dir
|
||||||
# for test purposes -- test checks that the file was written.
|
# for test purposes -- test checks that the file was written.
|
||||||
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
||||||
host_directory = "./container-output-dir"
|
host_directory = "./container-output-dir"
|
||||||
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
||||||
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
||||||
mounts = [
|
mounts = [VolumeMapping(str(host_directory_absolute), "/data")]
|
||||||
VolumeMapping(host_directory_absolute, "/data")
|
output, status = run_container_command(
|
||||||
]
|
command_context,
|
||||||
output, status = run_container_command(command_context, "test", "echo output-data > /data/output-file && echo success", mounts)
|
"test",
|
||||||
|
"echo output-data > /data/output-file && echo success",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def init(command_context: DeployCommandContext):
|
def init(command_context: DeployCommandContext):
|
||||||
@ -41,10 +46,10 @@ def init(command_context: DeployCommandContext):
|
|||||||
return yaml.load(default_spec_file_content)
|
return yaml.load(default_spec_file_content)
|
||||||
|
|
||||||
|
|
||||||
def create(command_context: DeployCommandContext, extra_args):
|
def create(deployment_context: DeploymentContext, extra_args):
|
||||||
data = "create-command-output-data"
|
data = "create-command-output-data"
|
||||||
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
output_file_path = deployment_context.deployment_dir.joinpath("create-file")
|
||||||
with open(output_file_path, 'w+') as output_file:
|
with open(output_file_path, "w+") as output_file:
|
||||||
output_file.write(data)
|
output_file.write(data)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@ version: "1.0"
|
|||||||
name: test
|
name: test
|
||||||
description: "A test stack"
|
description: "A test stack"
|
||||||
repos:
|
repos:
|
||||||
- git.vdb.to/cerc-io/laconicd
|
|
||||||
- git.vdb.to/cerc-io/test-project@test-branch
|
- git.vdb.to/cerc-io/test-project@test-branch
|
||||||
containers:
|
containers:
|
||||||
- cerc/test-container
|
- cerc/test-container
|
||||||
|
|||||||
@ -14,8 +14,13 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
from python_on_whales import DockerClient, DockerException
|
from python_on_whales import DockerClient, DockerException
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import (
|
||||||
|
Deployer,
|
||||||
|
DeployerException,
|
||||||
|
DeployerConfigGenerator,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
@ -24,10 +29,24 @@ class DockerDeployer(Deployer):
|
|||||||
name: str = "compose"
|
name: str = "compose"
|
||||||
type: str
|
type: str
|
||||||
|
|
||||||
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
def __init__(
|
||||||
self.docker = DockerClient(compose_files=compose_files, compose_project_name=compose_project_name,
|
self,
|
||||||
compose_env_file=compose_env_file)
|
type: str,
|
||||||
|
deployment_context: Optional[DeploymentContext],
|
||||||
|
compose_files: list,
|
||||||
|
compose_project_name: Optional[str],
|
||||||
|
compose_env_file: Optional[str],
|
||||||
|
) -> None:
|
||||||
|
self.docker = DockerClient(
|
||||||
|
compose_files=compose_files,
|
||||||
|
compose_project_name=compose_project_name,
|
||||||
|
compose_env_file=compose_env_file,
|
||||||
|
)
|
||||||
self.type = type
|
self.type = type
|
||||||
|
# Store these for later use in run_job
|
||||||
|
self.compose_files = compose_files
|
||||||
|
self.compose_project_name = compose_project_name
|
||||||
|
self.compose_env_file = compose_env_file
|
||||||
|
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
@ -68,35 +87,98 @@ class DockerDeployer(Deployer):
|
|||||||
def port(self, service, private_port):
|
def port(self, service, private_port):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.port(service=service, private_port=private_port)
|
return self.docker.compose.port(
|
||||||
|
service=service, private_port=private_port
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def execute(self, service, command, tty, envs):
|
def execute(self, service, command, tty, envs):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.execute(service=service, command=command, tty=tty, envs=envs)
|
return self.docker.compose.execute(
|
||||||
|
service=service, command=command, tty=tty, envs=envs
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def logs(self, services, tail, follow, stream):
|
def logs(self, services, tail, follow, stream):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.logs(services=services, tail=tail, follow=follow, stream=stream)
|
return self.docker.compose.logs(
|
||||||
|
services=services, tail=tail, follow=follow, stream=stream
|
||||||
|
)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(
|
||||||
|
self,
|
||||||
|
image: str,
|
||||||
|
command=None,
|
||||||
|
user=None,
|
||||||
|
volumes=None,
|
||||||
|
entrypoint=None,
|
||||||
|
env={},
|
||||||
|
ports=[],
|
||||||
|
detach=False,
|
||||||
|
):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.run(image=image, command=command, user=user, volumes=volumes,
|
return self.docker.run(
|
||||||
entrypoint=entrypoint, envs=env, detach=detach, publish=ports, publish_all=len(ports) == 0)
|
image=image,
|
||||||
|
command=command if command else [],
|
||||||
|
user=user,
|
||||||
|
volumes=volumes,
|
||||||
|
entrypoint=entrypoint,
|
||||||
|
envs=env,
|
||||||
|
detach=detach,
|
||||||
|
publish=ports,
|
||||||
|
publish_all=len(ports) == 0,
|
||||||
|
)
|
||||||
|
except DockerException as e:
|
||||||
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
||||||
|
# release_name is ignored for Docker deployments (only used for K8s/Helm)
|
||||||
|
if not opts.o.dry_run:
|
||||||
|
try:
|
||||||
|
# Find job compose file in compose-jobs directory
|
||||||
|
# The deployment should have compose-jobs/docker-compose-<job_name>.yml
|
||||||
|
if not self.compose_files:
|
||||||
|
raise DeployerException("No compose files configured")
|
||||||
|
|
||||||
|
# Deployment directory is parent of compose directory
|
||||||
|
compose_dir = Path(self.compose_files[0]).parent
|
||||||
|
deployment_dir = compose_dir.parent
|
||||||
|
job_compose_file = (
|
||||||
|
deployment_dir / "compose-jobs" / f"docker-compose-{job_name}.yml"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not job_compose_file.exists():
|
||||||
|
raise DeployerException(
|
||||||
|
f"Job compose file not found: {job_compose_file}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f"Running job from: {job_compose_file}")
|
||||||
|
|
||||||
|
# Create a DockerClient for the job compose file with same
|
||||||
|
# project name and env file
|
||||||
|
# This allows the job to access volumes from the main deployment
|
||||||
|
job_docker = DockerClient(
|
||||||
|
compose_files=[job_compose_file],
|
||||||
|
compose_project_name=self.compose_project_name,
|
||||||
|
compose_env_file=self.compose_env_file,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the job with --rm flag to remove container after completion
|
||||||
|
return job_docker.compose.run(service=job_name, remove=True, tty=True)
|
||||||
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
|
|
||||||
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
class DockerDeployerConfigGenerator(DeployerConfigGenerator):
|
||||||
|
|
||||||
def __init__(self, type: str) -> None:
|
def __init__(self, type: str) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from importlib import resources
|
from importlib import resources
|
||||||
|
from typing import Optional
|
||||||
import subprocess
|
import subprocess
|
||||||
import click
|
import click
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -35,27 +36,36 @@ from stack_orchestrator.util import (
|
|||||||
stack_is_in_deployment,
|
stack_is_in_deployment,
|
||||||
resolve_compose_file,
|
resolve_compose_file,
|
||||||
)
|
)
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerException
|
from stack_orchestrator.deploy.deployer import DeployerException
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
from stack_orchestrator.deploy.deployer_factory import getDeployer
|
||||||
|
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer
|
||||||
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import ClusterContext, DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
from stack_orchestrator.deploy.deployment_create import create as deployment_create
|
||||||
from stack_orchestrator.deploy.deployment_create import init as deployment_init
|
from stack_orchestrator.deploy.deployment_create import init as deployment_init
|
||||||
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
|
from stack_orchestrator.deploy.deployment_create import setup as deployment_setup
|
||||||
|
from stack_orchestrator.deploy.k8s import k8s_command
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.option("--include", help="only start these components")
|
@click.option("--include", help="only start these components")
|
||||||
@click.option("--exclude", help="don\'t start these components")
|
@click.option("--exclude", help="don't start these components")
|
||||||
@click.option("--env-file", help="env file to be used")
|
@click.option("--env-file", help="env file to be used")
|
||||||
@click.option("--cluster", help="specify a non-default cluster name")
|
@click.option("--cluster", help="specify a non-default cluster name")
|
||||||
@click.option("--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)")
|
@click.option(
|
||||||
|
"--deploy-to", help="cluster system to deploy to (compose or k8s or k8s-kind)"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
||||||
'''deploy a stack'''
|
"""deploy a stack"""
|
||||||
|
|
||||||
# Although in theory for some subcommands (e.g. deploy create) the stack can be inferred,
|
# k8s subcommand doesn't require a stack
|
||||||
# Click doesn't allow us to know that here, so we make providing the stack mandatory
|
if ctx.invoked_subcommand == "k8s":
|
||||||
|
return
|
||||||
|
|
||||||
|
# Although in theory for some subcommands (e.g. deploy create) the stack
|
||||||
|
# can be inferred, Click doesn't allow us to know that here, so we make
|
||||||
|
# providing the stack mandatory
|
||||||
stack = global_options2(ctx).stack
|
stack = global_options2(ctx).stack
|
||||||
if not stack:
|
if not stack:
|
||||||
print("Error: --stack option is required")
|
print("Error: --stack option is required")
|
||||||
@ -68,30 +78,65 @@ def command(ctx, include, exclude, env_file, cluster, deploy_to):
|
|||||||
deploy_to = "compose"
|
deploy_to = "compose"
|
||||||
|
|
||||||
stack = get_stack_path(stack)
|
stack = get_stack_path(stack)
|
||||||
ctx.obj = create_deploy_context(global_options2(ctx), None, stack, include, exclude, cluster, env_file, deploy_to)
|
ctx.obj = create_deploy_context(
|
||||||
# Subcommand is executed now, by the magic of click
|
global_options2(ctx),
|
||||||
|
None,
|
||||||
|
|
||||||
def create_deploy_context(
|
|
||||||
global_context,
|
|
||||||
deployment_context: DeploymentContext,
|
|
||||||
stack,
|
stack,
|
||||||
include,
|
include,
|
||||||
exclude,
|
exclude,
|
||||||
cluster,
|
cluster,
|
||||||
env_file,
|
env_file,
|
||||||
deploy_to) -> DeployCommandContext:
|
deploy_to,
|
||||||
|
)
|
||||||
|
# Subcommand is executed now, by the magic of click
|
||||||
|
|
||||||
|
|
||||||
|
def create_deploy_context(
|
||||||
|
global_context,
|
||||||
|
deployment_context: Optional[DeploymentContext],
|
||||||
|
stack,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
cluster,
|
||||||
|
env_file,
|
||||||
|
deploy_to,
|
||||||
|
) -> DeployCommandContext:
|
||||||
# Extract the cluster name from the deployment, if we have one
|
# Extract the cluster name from the deployment, if we have one
|
||||||
if deployment_context and cluster is None:
|
if deployment_context and cluster is None:
|
||||||
cluster = deployment_context.get_cluster_id()
|
cluster = deployment_context.get_cluster_id()
|
||||||
cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
|
|
||||||
deployer = getDeployer(deploy_to, deployment_context, compose_files=cluster_context.compose_files,
|
# Check if this is a helm chart deployment (has chart/ but no compose/)
|
||||||
compose_project_name=cluster_context.cluster,
|
# TODO: Add a new deployment type for helm chart deployments
|
||||||
compose_env_file=cluster_context.env_file)
|
# To avoid relying on chart existence in such cases
|
||||||
|
is_helm_chart_deployment = False
|
||||||
|
if deployment_context:
|
||||||
|
chart_dir = deployment_context.deployment_dir / "chart"
|
||||||
|
compose_dir = deployment_context.deployment_dir / "compose"
|
||||||
|
is_helm_chart_deployment = chart_dir.exists() and not compose_dir.exists()
|
||||||
|
|
||||||
|
# For helm chart deployments, skip compose file loading
|
||||||
|
if is_helm_chart_deployment:
|
||||||
|
cluster_context = ClusterContext(
|
||||||
|
global_context, cluster, [], [], [], None, env_file
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
cluster_context = _make_cluster_context(
|
||||||
|
global_context, stack, include, exclude, cluster, env_file
|
||||||
|
)
|
||||||
|
|
||||||
|
deployer = getDeployer(
|
||||||
|
deploy_to,
|
||||||
|
deployment_context,
|
||||||
|
compose_files=cluster_context.compose_files,
|
||||||
|
compose_project_name=cluster_context.cluster,
|
||||||
|
compose_env_file=cluster_context.env_file,
|
||||||
|
)
|
||||||
return DeployCommandContext(stack, cluster_context, deployer)
|
return DeployCommandContext(stack, cluster_context, deployer)
|
||||||
|
|
||||||
|
|
||||||
def up_operation(ctx, services_list, stay_attached=False, skip_cluster_management=False):
|
def up_operation(
|
||||||
|
ctx, services_list, stay_attached=False, skip_cluster_management=False
|
||||||
|
):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
cluster_context = deploy_context.cluster_context
|
cluster_context = deploy_context.cluster_context
|
||||||
@ -99,21 +144,38 @@ def up_operation(ctx, services_list, stay_attached=False, skip_cluster_managemen
|
|||||||
for attr, value in container_exec_env.items():
|
for attr, value in container_exec_env.items():
|
||||||
os.environ[attr] = value
|
os.environ[attr] = value
|
||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
print(
|
||||||
|
f"Running compose up with container_exec_env: {container_exec_env}, "
|
||||||
|
f"extra_args: {services_list}"
|
||||||
|
)
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.deployer.up(detach=not stay_attached, skip_cluster_management=skip_cluster_management, services=services_list)
|
deploy_context.deployer.up(
|
||||||
|
detach=not stay_attached,
|
||||||
|
skip_cluster_management=skip_cluster_management,
|
||||||
|
services=services_list,
|
||||||
|
)
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
_orchestrate_cluster_config(
|
||||||
|
global_context,
|
||||||
|
cluster_context.config,
|
||||||
|
deploy_context.deployer,
|
||||||
|
container_exec_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
||||||
timeout_arg = None
|
timeout_arg = None
|
||||||
if extra_args_list:
|
if extra_args_list:
|
||||||
timeout_arg = extra_args_list[0]
|
timeout_arg = extra_args_list[0]
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
# Specify shutdown timeout (default 10s) to give services enough time to
|
||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes, skip_cluster_management=skip_cluster_management)
|
# shutdown gracefully
|
||||||
|
ctx.obj.deployer.down(
|
||||||
|
timeout=timeout_arg,
|
||||||
|
volumes=delete_volumes,
|
||||||
|
skip_cluster_management=skip_cluster_management,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
@ -140,7 +202,11 @@ def ps_operation(ctx):
|
|||||||
if mapping is None:
|
if mapping is None:
|
||||||
print(f"{port_mapping}", end="")
|
print(f"{port_mapping}", end="")
|
||||||
else:
|
else:
|
||||||
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
|
print(
|
||||||
|
f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}"
|
||||||
|
f"->{port_mapping}",
|
||||||
|
end="",
|
||||||
|
)
|
||||||
comma = ", "
|
comma = ", "
|
||||||
print()
|
print()
|
||||||
else:
|
else:
|
||||||
@ -175,7 +241,9 @@ def exec_operation(ctx, extra_args):
|
|||||||
if global_context.verbose:
|
if global_context.verbose:
|
||||||
print(f"Running compose exec {service_name} {command_to_exec}")
|
print(f"Running compose exec {service_name} {command_to_exec}")
|
||||||
try:
|
try:
|
||||||
ctx.obj.deployer.execute(service_name, command_to_exec, envs=container_exec_env, tty=True)
|
ctx.obj.deployer.execute(
|
||||||
|
service_name, command_to_exec, envs=container_exec_env, tty=True
|
||||||
|
)
|
||||||
except DeployerException:
|
except DeployerException:
|
||||||
print("container command returned error exit status")
|
print("container command returned error exit status")
|
||||||
|
|
||||||
@ -183,13 +251,26 @@ def exec_operation(ctx, extra_args):
|
|||||||
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
def logs_operation(ctx, tail: int, follow: bool, extra_args: str):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
services_list = extra_args_list if extra_args_list is not None else []
|
services_list = extra_args_list if extra_args_list is not None else []
|
||||||
logs_stream = ctx.obj.deployer.logs(services=services_list, tail=tail, follow=follow, stream=True)
|
logs_stream = ctx.obj.deployer.logs(
|
||||||
|
services=services_list, tail=tail, follow=follow, stream=True
|
||||||
|
)
|
||||||
for stream_type, stream_content in logs_stream:
|
for stream_type, stream_content in logs_stream:
|
||||||
print(stream_content.decode("utf-8"), end="")
|
print(stream_content.decode("utf-8"), end="")
|
||||||
|
|
||||||
|
|
||||||
|
def run_job_operation(ctx, job_name: str, helm_release: Optional[str] = None):
|
||||||
|
global_context = ctx.parent.parent.obj
|
||||||
|
if not global_context.dry_run:
|
||||||
|
print(f"Running job: {job_name}")
|
||||||
|
try:
|
||||||
|
ctx.obj.deployer.run_job(job_name, helm_release)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error running job {job_name}: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, extra_args):
|
def up(ctx, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
@ -197,8 +278,10 @@ def up(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down<service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, extra_args):
|
def down(ctx, delete_volumes, extra_args):
|
||||||
extra_args_list = list(extra_args) or None
|
extra_args_list = list(extra_args) or None
|
||||||
@ -212,14 +295,14 @@ def ps(ctx):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
port_operation(ctx, extra_args)
|
port_operation(ctx, extra_args)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
exec_operation(ctx, extra_args)
|
exec_operation(ctx, extra_args)
|
||||||
@ -228,44 +311,49 @@ def exec(ctx, extra_args):
|
|||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
logs_operation(ctx, tail, follow, extra_args)
|
logs_operation(ctx, tail, follow, extra_args)
|
||||||
|
|
||||||
|
|
||||||
def get_stack_status(ctx, stack):
|
def get_stack_status(ctx, stack):
|
||||||
|
|
||||||
ctx_copy = copy.copy(ctx)
|
ctx_copy = copy.copy(ctx)
|
||||||
ctx_copy.stack = stack
|
ctx_copy.stack = stack
|
||||||
|
|
||||||
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
|
||||||
deployer = Deployer(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
|
deployer = DockerDeployer(
|
||||||
|
type="compose",
|
||||||
|
deployment_context=None,
|
||||||
|
compose_files=cluster_context.compose_files,
|
||||||
|
compose_project_name=cluster_context.cluster,
|
||||||
|
compose_env_file=cluster_context.env_file,
|
||||||
|
)
|
||||||
# TODO: refactor to avoid duplicating this code above
|
# TODO: refactor to avoid duplicating this code above
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Running compose ps")
|
print("Running compose ps")
|
||||||
container_list = deployer.ps()
|
container_list = deployer.ps()
|
||||||
if len(container_list) > 0:
|
if container_list is None or len(container_list) == 0:
|
||||||
if ctx.debug:
|
|
||||||
print(f"Container list from compose ps: {container_list}")
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print("No containers found from compose ps")
|
print("No containers found from compose ps")
|
||||||
False
|
return False
|
||||||
|
if ctx.debug:
|
||||||
|
print(f"Container list from compose ps: {container_list}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _make_runtime_env(ctx):
|
def _make_runtime_env(ctx):
|
||||||
container_exec_env = {
|
container_exec_env = {
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}"
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
}
|
}
|
||||||
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
container_exec_env.update({"CERC_SCRIPT_DEBUG": "true"} if ctx.debug else {})
|
||||||
return container_exec_env
|
return container_exec_env
|
||||||
|
|
||||||
|
|
||||||
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude):
|
||||||
# Create default unique, stable cluster name from confile file path and stack name if provided
|
# Create default unique, stable cluster name from confile file path and
|
||||||
|
# stack name if provided
|
||||||
if deployment:
|
if deployment:
|
||||||
path = os.path.realpath(os.path.abspath(compose_dir))
|
path = os.path.realpath(os.path.abspath(compose_dir))
|
||||||
else:
|
else:
|
||||||
@ -280,7 +368,8 @@ def _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
|||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
# stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
|
# stack has to be either PathLike pointing to a stack yml file, or a
|
||||||
|
# string with the name of a known stack
|
||||||
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
||||||
dev_root_path = get_dev_root_path(ctx)
|
dev_root_path = get_dev_root_path(ctx)
|
||||||
|
|
||||||
@ -289,28 +378,37 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if deployment:
|
if deployment:
|
||||||
compose_dir = stack.joinpath("compose")
|
compose_dir = stack.joinpath("compose")
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See:
|
||||||
compose_dir = Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
|
compose_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "compose")
|
||||||
|
)
|
||||||
|
|
||||||
if cluster is None:
|
if cluster is None:
|
||||||
cluster = _make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
cluster = _make_default_cluster_name(
|
||||||
|
deployment, compose_dir, stack, include, exclude
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
_make_default_cluster_name(deployment, compose_dir, stack, include, exclude)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
|
||||||
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
with resources.open_text(data, "pod-list.txt") as pod_list_file:
|
||||||
all_pods = pod_list_file.read().splitlines()
|
all_pods = pod_list_file.read().splitlines()
|
||||||
|
|
||||||
pods_in_scope = []
|
pods_in_scope = []
|
||||||
|
cluster_config = None
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
if stack_config is not None:
|
||||||
pods_in_scope = stack_config['pods']
|
# TODO: syntax check the input here
|
||||||
cluster_config = stack_config['config'] if 'config' in stack_config else None
|
pods_in_scope = stack_config["pods"]
|
||||||
|
cluster_config = (
|
||||||
|
stack_config["config"] if "config" in stack_config else None
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
pods_in_scope = all_pods
|
pods_in_scope = all_pods
|
||||||
cluster_config = None
|
|
||||||
|
|
||||||
# Convert all pod definitions to v1.1 format
|
# Convert all pod definitions to v1.1 format
|
||||||
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
pods_in_scope = _convert_to_new_format(pods_in_scope)
|
||||||
@ -330,29 +428,47 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if include_exclude_check(pod_name, include, exclude):
|
if include_exclude_check(pod_name, include, exclude):
|
||||||
if pod_repository is None or pod_repository == "internal":
|
if pod_repository is None or pod_repository == "internal":
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_path}.yml")
|
compose_file_name = os.path.join(
|
||||||
|
compose_dir, f"docker-compose-{pod_path}.yml"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
compose_file_name = resolve_compose_file(stack, pod_name)
|
compose_file_name = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
if deployment:
|
if deployment:
|
||||||
compose_file_name = os.path.join(compose_dir, f"docker-compose-{pod_name}.yml")
|
compose_file_name = os.path.join(
|
||||||
|
compose_dir, f"docker-compose-{pod_name}.yml"
|
||||||
|
)
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
script_dir = compose_dir.parent.joinpath("pods", pod_name, "scripts")
|
script_dir = compose_dir.parent.joinpath(
|
||||||
|
"pods", pod_name, "scripts"
|
||||||
|
)
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(os.path.join(script_dir, pod_pre_start_command))
|
pre_start_commands.append(
|
||||||
|
os.path.join(script_dir, pod_pre_start_command)
|
||||||
|
)
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(os.path.join(script_dir, pod_post_start_command))
|
post_start_commands.append(
|
||||||
|
os.path.join(script_dir, pod_post_start_command)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# TODO: fix this code for external stack with scripts
|
# TODO: fix this code for external stack with scripts
|
||||||
pod_root_dir = os.path.join(dev_root_path, pod_repository.split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
compose_file_name = os.path.join(pod_root_dir, f"docker-compose-{pod_name}.yml")
|
dev_root_path, pod_repository.split("/")[-1], pod["path"]
|
||||||
|
)
|
||||||
|
compose_file_name = os.path.join(
|
||||||
|
pod_root_dir, f"docker-compose-{pod_name}.yml"
|
||||||
|
)
|
||||||
pod_pre_start_command = pod.get("pre_start_command")
|
pod_pre_start_command = pod.get("pre_start_command")
|
||||||
pod_post_start_command = pod.get("post_start_command")
|
pod_post_start_command = pod.get("post_start_command")
|
||||||
if pod_pre_start_command is not None:
|
if pod_pre_start_command is not None:
|
||||||
pre_start_commands.append(os.path.join(pod_root_dir, pod_pre_start_command))
|
pre_start_commands.append(
|
||||||
|
os.path.join(pod_root_dir, pod_pre_start_command)
|
||||||
|
)
|
||||||
if pod_post_start_command is not None:
|
if pod_post_start_command is not None:
|
||||||
post_start_commands.append(os.path.join(pod_root_dir, pod_post_start_command))
|
post_start_commands.append(
|
||||||
|
os.path.join(pod_root_dir, pod_post_start_command)
|
||||||
|
)
|
||||||
compose_files.append(compose_file_name)
|
compose_files.append(compose_file_name)
|
||||||
else:
|
else:
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
@ -361,7 +477,15 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"files: {compose_files}")
|
print(f"files: {compose_files}")
|
||||||
|
|
||||||
return ClusterContext(ctx, cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
|
return ClusterContext(
|
||||||
|
ctx,
|
||||||
|
cluster,
|
||||||
|
compose_files,
|
||||||
|
pre_start_commands,
|
||||||
|
post_start_commands,
|
||||||
|
cluster_config,
|
||||||
|
env_file,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_new_format(old_pod_array):
|
def _convert_to_new_format(old_pod_array):
|
||||||
@ -370,11 +494,7 @@ def _convert_to_new_format(old_pod_array):
|
|||||||
if isinstance(old_pod, dict):
|
if isinstance(old_pod, dict):
|
||||||
new_pod_array.append(old_pod)
|
new_pod_array.append(old_pod)
|
||||||
else:
|
else:
|
||||||
new_pod = {
|
new_pod = {"name": old_pod, "repository": "internal", "path": old_pod}
|
||||||
"name": old_pod,
|
|
||||||
"repository": "internal",
|
|
||||||
"path": old_pod
|
|
||||||
}
|
|
||||||
new_pod_array.append(new_pod)
|
new_pod_array.append(new_pod)
|
||||||
return new_pod_array
|
return new_pod_array
|
||||||
|
|
||||||
@ -388,14 +508,15 @@ def _run_command(ctx, cluster_name, command):
|
|||||||
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
command_env["CERC_SO_COMPOSE_PROJECT"] = cluster_name
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
command_env["CERC_SCRIPT_DEBUG"] = "true"
|
||||||
command_result = subprocess.run(command_file, shell=True, env=command_env, cwd=command_dir)
|
command_result = subprocess.run(
|
||||||
|
command_file, shell=True, env=command_env, cwd=command_dir
|
||||||
|
)
|
||||||
if command_result.returncode != 0:
|
if command_result.returncode != 0:
|
||||||
print(f"FATAL Error running command: {command}")
|
print(f"FATAL Error running command: {command}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_env):
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ConfigDirective:
|
class ConfigDirective:
|
||||||
source_container: str
|
source_container: str
|
||||||
@ -413,24 +534,32 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
container_config[directive].split(".")[0],
|
container_config[directive].split(".")[0],
|
||||||
container_config[directive].split(".")[1],
|
container_config[directive].split(".")[1],
|
||||||
container,
|
container,
|
||||||
directive
|
directive,
|
||||||
)
|
)
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
|
print(
|
||||||
f" = {pd.source_container}.{pd.source_variable}")
|
f"Setting {pd.destination_container}.{pd.destination_variable}"
|
||||||
|
f" = {pd.source_container}.{pd.source_variable}"
|
||||||
|
)
|
||||||
# TODO: add a timeout
|
# TODO: add a timeout
|
||||||
waiting_for_data = True
|
waiting_for_data = True
|
||||||
destination_output = "*** no output received yet ***"
|
destination_output = "*** no output received yet ***"
|
||||||
while waiting_for_data:
|
while waiting_for_data:
|
||||||
# TODO: fix the script paths so they're consistent between containers
|
# TODO: fix the script paths so they're consistent between
|
||||||
|
# containers
|
||||||
source_value = None
|
source_value = None
|
||||||
try:
|
try:
|
||||||
source_value = deployer.execute(pd.source_container,
|
source_value = deployer.execute(
|
||||||
["sh", "-c",
|
pd.source_container,
|
||||||
"sh /docker-entrypoint-scripts.d/export-"
|
[
|
||||||
f"{pd.source_variable}.sh"],
|
"sh",
|
||||||
tty=False,
|
"-c",
|
||||||
envs=container_exec_env)
|
"sh /docker-entrypoint-scripts.d/export-"
|
||||||
|
f"{pd.source_variable}.sh",
|
||||||
|
],
|
||||||
|
tty=False,
|
||||||
|
envs=container_exec_env,
|
||||||
|
)
|
||||||
except DeployerException as error:
|
except DeployerException as error:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"Docker exception reading config source: {error}")
|
print(f"Docker exception reading config source: {error}")
|
||||||
@ -438,20 +567,28 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
# "It returned with code 1"
|
# "It returned with code 1"
|
||||||
if "It returned with code 1" in str(error):
|
if "It returned with code 1" in str(error):
|
||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print("Config export script returned an error, re-trying")
|
print(
|
||||||
# If the script failed to execute (e.g. the file is not there) then we get:
|
"Config export script returned an error, re-trying"
|
||||||
|
)
|
||||||
|
# If the script failed to execute
|
||||||
|
# (e.g. the file is not there) then we get:
|
||||||
# "It returned with code 2"
|
# "It returned with code 2"
|
||||||
if "It returned with code 2" in str(error):
|
if "It returned with code 2" in str(error):
|
||||||
print(f"Fatal error reading config source: {error}")
|
print(f"Fatal error reading config source: {error}")
|
||||||
if source_value:
|
if source_value:
|
||||||
if ctx.debug:
|
if ctx.debug:
|
||||||
print(f"fetched source value: {source_value}")
|
print(f"fetched source value: {source_value}")
|
||||||
destination_output = deployer.execute(pd.destination_container,
|
destination_output = deployer.execute(
|
||||||
["sh", "-c",
|
pd.destination_container,
|
||||||
f"sh /scripts/import-{pd.destination_variable}.sh"
|
[
|
||||||
f" {source_value}"],
|
"sh",
|
||||||
tty=False,
|
"-c",
|
||||||
envs=container_exec_env)
|
f"sh /scripts/import-{pd.destination_variable}.sh"
|
||||||
|
f" {source_value}",
|
||||||
|
],
|
||||||
|
tty=False,
|
||||||
|
envs=container_exec_env,
|
||||||
|
)
|
||||||
waiting_for_data = False
|
waiting_for_data = False
|
||||||
if ctx.debug and not waiting_for_data:
|
if ctx.debug and not waiting_for_data:
|
||||||
print(f"destination output: {destination_output}")
|
print(f"destination output: {destination_output}")
|
||||||
@ -460,3 +597,4 @@ def _orchestrate_cluster_config(ctx, cluster_config, deployer, container_exec_en
|
|||||||
command.add_command(deployment_init)
|
command.add_command(deployment_init)
|
||||||
command.add_command(deployment_create)
|
command.add_command(deployment_create)
|
||||||
command.add_command(deployment_setup)
|
command.add_command(deployment_setup)
|
||||||
|
command.add_command(k8s_command.command, "k8s")
|
||||||
|
|||||||
@ -13,7 +13,7 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from typing import List, Mapping
|
from typing import List, Mapping, Optional
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from stack_orchestrator.command_types import CommandOptions
|
from stack_orchestrator.command_types import CommandOptions
|
||||||
from stack_orchestrator.deploy.deployer import Deployer
|
from stack_orchestrator.deploy.deployer import Deployer
|
||||||
@ -21,20 +21,21 @@ from stack_orchestrator.deploy.deployer import Deployer
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ClusterContext:
|
class ClusterContext:
|
||||||
options: CommandOptions # TODO: this should be in its own object not stuffed in here
|
# TODO: this should be in its own object not stuffed in here
|
||||||
cluster: str
|
options: CommandOptions
|
||||||
|
cluster: Optional[str]
|
||||||
compose_files: List[str]
|
compose_files: List[str]
|
||||||
pre_start_commands: List[str]
|
pre_start_commands: List[str]
|
||||||
post_start_commands: List[str]
|
post_start_commands: List[str]
|
||||||
config: str
|
config: Optional[str]
|
||||||
env_file: str
|
env_file: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DeployCommandContext:
|
class DeployCommandContext:
|
||||||
stack: str
|
stack: str
|
||||||
cluster_context: ClusterContext
|
cluster_context: ClusterContext
|
||||||
deployer: Deployer
|
deployer: Optional[Deployer]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|||||||
@ -15,7 +15,12 @@
|
|||||||
|
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
from stack_orchestrator.util import (
|
||||||
|
get_parsed_stack_config,
|
||||||
|
get_yaml,
|
||||||
|
get_pod_list,
|
||||||
|
resolve_compose_file,
|
||||||
|
)
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
|
||||||
@ -38,7 +43,7 @@ def _container_image_from_service(stack: str, service: str):
|
|||||||
|
|
||||||
|
|
||||||
def parsed_pod_files_map_from_file_names(pod_files):
|
def parsed_pod_files_map_from_file_names(pod_files):
|
||||||
parsed_pod_yaml_map : Any = {}
|
parsed_pod_yaml_map: Any = {}
|
||||||
for pod_file in pod_files:
|
for pod_file in pod_files:
|
||||||
with open(pod_file, "r") as pod_file_descriptor:
|
with open(pod_file, "r") as pod_file_descriptor:
|
||||||
parsed_pod_file = get_yaml().load(pod_file_descriptor)
|
parsed_pod_file = get_yaml().load(pod_file_descriptor)
|
||||||
@ -73,19 +78,28 @@ def _volumes_to_docker(mounts: List[VolumeMapping]):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def run_container_command(ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]):
|
def run_container_command(
|
||||||
|
ctx: DeployCommandContext, service: str, command: str, mounts: List[VolumeMapping]
|
||||||
|
):
|
||||||
deployer = ctx.deployer
|
deployer = ctx.deployer
|
||||||
|
if deployer is None:
|
||||||
|
raise ValueError("Deployer is not configured")
|
||||||
container_image = _container_image_from_service(ctx.stack, service)
|
container_image = _container_image_from_service(ctx.stack, service)
|
||||||
|
if container_image is None:
|
||||||
|
raise ValueError(f"Container image not found for service: {service}")
|
||||||
docker_volumes = _volumes_to_docker(mounts)
|
docker_volumes = _volumes_to_docker(mounts)
|
||||||
if ctx.cluster_context.options.debug:
|
if ctx.cluster_context.options.debug:
|
||||||
print(f"Running this command in {service} container: {command}")
|
print(f"Running this command in {service} container: {command}")
|
||||||
docker_output = deployer.run(
|
docker_output = deployer.run(
|
||||||
container_image,
|
container_image,
|
||||||
["-c", command], entrypoint="sh",
|
["-c", command],
|
||||||
# Current laconicd container has a bug where it crashes when run not as root
|
entrypoint="sh",
|
||||||
# Commented out line below is a workaround. Created files end up owned by root on the host
|
# Current laconicd container has a bug where it crashes when run not
|
||||||
|
# as root
|
||||||
|
# Commented out line below is a workaround. Created files end up
|
||||||
|
# owned by root on the host
|
||||||
# user=f"{os.getuid()}:{os.getgid()}",
|
# user=f"{os.getuid()}:{os.getgid()}",
|
||||||
volumes=docker_volumes
|
volumes=docker_volumes,
|
||||||
)
|
)
|
||||||
# There doesn't seem to be a way to get an exit code from docker.run()
|
# There doesn't seem to be a way to get an exit code from docker.run()
|
||||||
return (docker_output, 0)
|
return (docker_output, 0)
|
||||||
|
|||||||
@ -15,10 +15,10 @@
|
|||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
class Deployer(ABC):
|
class Deployer(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def up(self, detach, skip_cluster_management, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
pass
|
pass
|
||||||
@ -52,7 +52,21 @@ class Deployer(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(
|
||||||
|
self,
|
||||||
|
image: str,
|
||||||
|
command=None,
|
||||||
|
user=None,
|
||||||
|
volumes=None,
|
||||||
|
entrypoint=None,
|
||||||
|
env={},
|
||||||
|
ports=[],
|
||||||
|
detach=False,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run_job(self, job_name: str, release_name: Optional[str] = None):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -62,7 +76,6 @@ class DeployerException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class DeployerConfigGenerator(ABC):
|
class DeployerConfigGenerator(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -14,8 +14,14 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.k8s.deploy_k8s import K8sDeployer, K8sDeployerConfigGenerator
|
from stack_orchestrator.deploy.k8s.deploy_k8s import (
|
||||||
from stack_orchestrator.deploy.compose.deploy_docker import DockerDeployer, DockerDeployerConfigGenerator
|
K8sDeployer,
|
||||||
|
K8sDeployerConfigGenerator,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.compose.deploy_docker import (
|
||||||
|
DockerDeployer,
|
||||||
|
DockerDeployerConfigGenerator,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def getDeployerConfigGenerator(type: str, deployment_context):
|
def getDeployerConfigGenerator(type: str, deployment_context):
|
||||||
@ -27,10 +33,27 @@ def getDeployerConfigGenerator(type: str, deployment_context):
|
|||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|
||||||
|
|
||||||
def getDeployer(type: str, deployment_context, compose_files, compose_project_name, compose_env_file):
|
def getDeployer(
|
||||||
|
type: str, deployment_context, compose_files, compose_project_name, compose_env_file
|
||||||
|
):
|
||||||
if type == "compose" or type is None:
|
if type == "compose" or type is None:
|
||||||
return DockerDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
return DockerDeployer(
|
||||||
elif type == type == constants.k8s_deploy_type or type == constants.k8s_kind_deploy_type:
|
type,
|
||||||
return K8sDeployer(type, deployment_context, compose_files, compose_project_name, compose_env_file)
|
deployment_context,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
type == type == constants.k8s_deploy_type
|
||||||
|
or type == constants.k8s_kind_deploy_type
|
||||||
|
):
|
||||||
|
return K8sDeployer(
|
||||||
|
type,
|
||||||
|
deployment_context,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(f"ERROR: deploy-to {type} is not valid")
|
print(f"ERROR: deploy-to {type} is not valid")
|
||||||
|
|||||||
@ -18,8 +18,19 @@ from pathlib import Path
|
|||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.images import push_images_operation
|
from stack_orchestrator.deploy.images import push_images_operation
|
||||||
from stack_orchestrator.deploy.deploy import up_operation, down_operation, ps_operation, port_operation, status_operation
|
from stack_orchestrator.deploy.deploy import (
|
||||||
from stack_orchestrator.deploy.deploy import exec_operation, logs_operation, create_deploy_context, update_operation
|
up_operation,
|
||||||
|
down_operation,
|
||||||
|
ps_operation,
|
||||||
|
port_operation,
|
||||||
|
status_operation,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.deploy import (
|
||||||
|
exec_operation,
|
||||||
|
logs_operation,
|
||||||
|
create_deploy_context,
|
||||||
|
update_operation,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
|
|
||||||
@ -28,7 +39,7 @@ from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
|||||||
@click.option("--dir", required=True, help="path to deployment directory")
|
@click.option("--dir", required=True, help="path to deployment directory")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, dir):
|
def command(ctx, dir):
|
||||||
'''manage a deployment'''
|
"""manage a deployment"""
|
||||||
|
|
||||||
# Check that --stack wasn't supplied
|
# Check that --stack wasn't supplied
|
||||||
if ctx.parent.obj.stack:
|
if ctx.parent.obj.stack:
|
||||||
@ -40,7 +51,10 @@ def command(ctx, dir):
|
|||||||
print(f"Error: deployment directory {dir} does not exist")
|
print(f"Error: deployment directory {dir} does not exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not dir_path.is_dir():
|
if not dir_path.is_dir():
|
||||||
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
|
print(
|
||||||
|
f"Error: supplied deployment directory path {dir} exists but is a "
|
||||||
|
"file not a directory"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Store the deployment context for subcommands
|
# Store the deployment context for subcommands
|
||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
@ -57,16 +71,31 @@ def make_deploy_context(ctx) -> DeployCommandContext:
|
|||||||
else:
|
else:
|
||||||
deployment_type = constants.compose_deploy_type
|
deployment_type = constants.compose_deploy_type
|
||||||
stack = context.deployment_dir
|
stack = context.deployment_dir
|
||||||
return create_deploy_context(ctx.parent.parent.obj, context, stack, None, None,
|
return create_deploy_context(
|
||||||
cluster_name, env_file, deployment_type)
|
ctx.parent.parent.obj,
|
||||||
|
context,
|
||||||
|
stack,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
cluster_name,
|
||||||
|
env_file,
|
||||||
|
deployment_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for start
|
# TODO: remove legacy up command since it's an alias for start
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--stay-attached/--detatch-terminal",
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
default=False,
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
help="detatch or not to see container stdout",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -76,10 +105,17 @@ def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
|||||||
|
|
||||||
# start is the preferred alias for up
|
# start is the preferred alias for up
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--stay-attached/--detatch-terminal",
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
default=False,
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
help="detatch or not to see container stdout",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -89,10 +125,15 @@ def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
|||||||
|
|
||||||
# TODO: remove legacy up command since it's an alias for stop
|
# TODO: remove legacy up command since it's an alias for stop
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
)
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# Get the stack config file name
|
# Get the stack config file name
|
||||||
@ -103,10 +144,15 @@ def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
|||||||
|
|
||||||
# stop is the preferred alias for down
|
# stop is the preferred alias for down
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option(
|
||||||
@click.option("--skip-cluster-management/--perform-cluster-management",
|
"--delete-volumes/--preserve-volumes", default=False, help="delete data volumes"
|
||||||
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
)
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.option(
|
||||||
|
"--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False,
|
||||||
|
help="Skip cluster initialization/tear-down (only for kind-k8s deployments)",
|
||||||
|
)
|
||||||
|
@click.argument("extra_args", nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
@ -130,7 +176,7 @@ def push_images(ctx):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: port <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def port(ctx, extra_args):
|
def port(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -138,7 +184,7 @@ def port(ctx, extra_args):
|
|||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
|
@click.argument("extra_args", nargs=-1) # help: command: exec <service> <command>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def exec(ctx, extra_args):
|
def exec(ctx, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -148,7 +194,7 @@ def exec(ctx, extra_args):
|
|||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
@click.option("--tail", "-n", default=None, help="number of lines to display")
|
||||||
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
@click.option("--follow", "-f", is_flag=True, default=False, help="follow log output")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
|
@click.argument("extra_args", nargs=-1) # help: command: logs <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def logs(ctx, tail, follow, extra_args):
|
def logs(ctx, tail, follow, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
@ -167,3 +213,18 @@ def status(ctx):
|
|||||||
def update(ctx):
|
def update(ctx):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
update_operation(ctx)
|
update_operation(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
@command.command()
|
||||||
|
@click.argument("job_name")
|
||||||
|
@click.option(
|
||||||
|
"--helm-release",
|
||||||
|
help="Helm release name (for k8s helm chart deployments, defaults to chart name)",
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def run_job(ctx, job_name, helm_release):
|
||||||
|
"""run a one-time job from the stack"""
|
||||||
|
from stack_orchestrator.deploy.deploy import run_job_operation
|
||||||
|
|
||||||
|
ctx.obj = make_deploy_context(ctx)
|
||||||
|
run_job_operation(ctx, job_name, helm_release)
|
||||||
|
|||||||
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# Copyright © 2022, 2023 Vulcanize
|
# Copyright © 2022, 2023 Vulcanize
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@ -45,20 +44,22 @@ class DeploymentContext:
|
|||||||
def get_compose_dir(self):
|
def get_compose_dir(self):
|
||||||
return self.deployment_dir.joinpath(constants.compose_dir_name)
|
return self.deployment_dir.joinpath(constants.compose_dir_name)
|
||||||
|
|
||||||
|
def get_compose_file(self, name: str):
|
||||||
|
return self.get_compose_dir() / f"docker-compose-{name}.yml"
|
||||||
|
|
||||||
def get_cluster_id(self):
|
def get_cluster_id(self):
|
||||||
return self.id
|
return self.id
|
||||||
|
|
||||||
def init(self, dir):
|
def init(self, dir: Path):
|
||||||
self.deployment_dir = dir
|
self.deployment_dir = dir.absolute()
|
||||||
self.spec = Spec()
|
self.spec = Spec()
|
||||||
self.spec.init_from_file(self.get_spec_file())
|
self.spec.init_from_file(self.get_spec_file())
|
||||||
self.stack = Stack(self.spec.obj["stack"])
|
self.stack = Stack(self.spec.obj["stack"])
|
||||||
self.stack.init_from_file(self.get_stack_file())
|
self.stack.init_from_file(self.get_stack_file())
|
||||||
deployment_file_path = self.get_deployment_file()
|
deployment_file_path = self.get_deployment_file()
|
||||||
if deployment_file_path.exists():
|
if deployment_file_path.exists():
|
||||||
with deployment_file_path:
|
obj = get_yaml().load(open(deployment_file_path, "r"))
|
||||||
obj = get_yaml().load(open(deployment_file_path, "r"))
|
self.id = obj[constants.cluster_id_key]
|
||||||
self.id = obj[constants.cluster_id_key]
|
|
||||||
# Handle the case of a legacy deployment with no file
|
# Handle the case of a legacy deployment with no file
|
||||||
# Code below is intended to match the output from _make_default_cluster_name()
|
# Code below is intended to match the output from _make_default_cluster_name()
|
||||||
# TODO: remove when we no longer need to support legacy deployments
|
# TODO: remove when we no longer need to support legacy deployments
|
||||||
@ -67,3 +68,17 @@ class DeploymentContext:
|
|||||||
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
|
||||||
self.id = f"{constants.cluster_name_prefix}{hash}"
|
self.id = f"{constants.cluster_name_prefix}{hash}"
|
||||||
|
|
||||||
|
def modify_yaml(self, file_path: Path, modifier_func):
|
||||||
|
"""Load a YAML, apply a modification function, and write it back."""
|
||||||
|
if not file_path.absolute().is_relative_to(self.deployment_dir):
|
||||||
|
raise ValueError(f"File is not inside deployment directory: {file_path}")
|
||||||
|
|
||||||
|
yaml = get_yaml()
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
yaml_data = yaml.load(f)
|
||||||
|
|
||||||
|
modifier_func(yaml_data)
|
||||||
|
|
||||||
|
with open(file_path, "w") as f:
|
||||||
|
yaml.dump(yaml_data, f)
|
||||||
|
|||||||
@ -24,10 +24,23 @@ from secrets import token_hex
|
|||||||
import sys
|
import sys
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import (get_stack_path, get_parsed_deployment_spec, get_parsed_stack_config,
|
from stack_orchestrator.util import (
|
||||||
global_options, get_yaml, get_pod_list, get_pod_file_path, pod_has_scripts,
|
get_stack_path,
|
||||||
get_pod_script_paths, get_plugin_code_paths, error_exit, env_var_map_from_file,
|
get_parsed_deployment_spec,
|
||||||
resolve_config_dir)
|
get_parsed_stack_config,
|
||||||
|
global_options,
|
||||||
|
get_yaml,
|
||||||
|
get_pod_list,
|
||||||
|
get_pod_file_path,
|
||||||
|
pod_has_scripts,
|
||||||
|
get_pod_script_paths,
|
||||||
|
get_plugin_code_paths,
|
||||||
|
error_exit,
|
||||||
|
env_var_map_from_file,
|
||||||
|
resolve_config_dir,
|
||||||
|
get_job_list,
|
||||||
|
get_job_file_path,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.spec import Spec
|
from stack_orchestrator.deploy.spec import Spec
|
||||||
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import LaconicStackSetupCommand
|
||||||
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer_factory import getDeployerConfigGenerator
|
||||||
@ -45,21 +58,21 @@ def _get_ports(stack):
|
|||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "services" in parsed_pod_file:
|
if "services" in parsed_pod_file:
|
||||||
for svc_name, svc in parsed_pod_file["services"].items():
|
for svc_name, svc in parsed_pod_file["services"].items():
|
||||||
if "ports" in svc:
|
if "ports" in svc:
|
||||||
# Ports can appear as strings or numbers. We normalize them as strings.
|
# Ports can appear as strings or numbers. We normalize them as
|
||||||
|
# strings.
|
||||||
ports[svc_name] = [str(x) for x in svc["ports"]]
|
ports[svc_name] = [str(x) for x in svc["ports"]]
|
||||||
return ports
|
return ports
|
||||||
|
|
||||||
|
|
||||||
def _get_named_volumes(stack):
|
def _get_named_volumes(stack):
|
||||||
# Parse the compose files looking for named volumes
|
# Parse the compose files looking for named volumes
|
||||||
named_volumes = {
|
named_volumes = {"rw": [], "ro": []}
|
||||||
"rw": [],
|
|
||||||
"ro": []
|
|
||||||
}
|
|
||||||
parsed_stack = get_parsed_stack_config(stack)
|
parsed_stack = get_parsed_stack_config(stack)
|
||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
@ -75,12 +88,14 @@ def _get_named_volumes(stack):
|
|||||||
ret[svc_name] = {
|
ret[svc_name] = {
|
||||||
"volume": parts[0],
|
"volume": parts[0],
|
||||||
"mount": parts[1],
|
"mount": parts[1],
|
||||||
"options": parts[2] if len(parts) == 3 else None
|
"options": parts[2] if len(parts) == 3 else None,
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
if "volumes" in parsed_pod_file:
|
if "volumes" in parsed_pod_file:
|
||||||
volumes = parsed_pod_file["volumes"]
|
volumes = parsed_pod_file["volumes"]
|
||||||
@ -88,7 +103,10 @@ def _get_named_volumes(stack):
|
|||||||
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
for vu in find_vol_usage(parsed_pod_file, volume).values():
|
||||||
read_only = vu["options"] == "ro"
|
read_only = vu["options"] == "ro"
|
||||||
if read_only:
|
if read_only:
|
||||||
if vu["volume"] not in named_volumes["rw"] and vu["volume"] not in named_volumes["ro"]:
|
if (
|
||||||
|
vu["volume"] not in named_volumes["rw"]
|
||||||
|
and vu["volume"] not in named_volumes["ro"]
|
||||||
|
):
|
||||||
named_volumes["ro"].append(vu["volume"])
|
named_volumes["ro"].append(vu["volume"])
|
||||||
else:
|
else:
|
||||||
if vu["volume"] not in named_volumes["rw"]:
|
if vu["volume"] not in named_volumes["rw"]:
|
||||||
@ -108,10 +126,13 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
|
|||||||
absolute_path.mkdir(parents=True, exist_ok=True)
|
absolute_path.mkdir(parents=True, exist_ok=True)
|
||||||
else:
|
else:
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
print(f"WARNING: mount path for volume {volume} does not exist: {path_string}")
|
print(
|
||||||
|
f"WARNING: mount path for volume {volume} does not exist: {path_string}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
# See:
|
||||||
|
# https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
|
||||||
def _fixup_pod_file(pod, spec, compose_dir):
|
def _fixup_pod_file(pod, spec, compose_dir):
|
||||||
deployment_type = spec[constants.deploy_to_key]
|
deployment_type = spec[constants.deploy_to_key]
|
||||||
# Fix up volumes
|
# Fix up volumes
|
||||||
@ -123,7 +144,11 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
|||||||
if volume in spec_volumes:
|
if volume in spec_volumes:
|
||||||
volume_spec = spec_volumes[volume]
|
volume_spec = spec_volumes[volume]
|
||||||
if volume_spec:
|
if volume_spec:
|
||||||
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
|
volume_spec_fixedup = (
|
||||||
|
volume_spec
|
||||||
|
if Path(volume_spec).is_absolute()
|
||||||
|
else f".{volume_spec}"
|
||||||
|
)
|
||||||
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
|
||||||
# this is Docker specific
|
# this is Docker specific
|
||||||
if spec.is_docker_deployment():
|
if spec.is_docker_deployment():
|
||||||
@ -132,8 +157,8 @@ def _fixup_pod_file(pod, spec, compose_dir):
|
|||||||
"driver_opts": {
|
"driver_opts": {
|
||||||
"type": "none",
|
"type": "none",
|
||||||
"device": volume_spec_fixedup,
|
"device": volume_spec_fixedup,
|
||||||
"o": "bind"
|
"o": "bind",
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
pod["volumes"][volume] = new_volume_spec
|
pod["volumes"][volume] = new_volume_spec
|
||||||
|
|
||||||
@ -181,6 +206,8 @@ def call_stack_deploy_init(deploy_command_context):
|
|||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "init"):
|
if _has_method(imported_stack, "init"):
|
||||||
@ -189,12 +216,17 @@ def call_stack_deploy_init(deploy_command_context):
|
|||||||
init_done = True
|
init_done = True
|
||||||
else:
|
else:
|
||||||
# TODO: remove this restriction
|
# TODO: remove this restriction
|
||||||
print(f"Skipping init() from plugin {python_file_path}. Only one init() is allowed.")
|
print(
|
||||||
|
f"Skipping init() from plugin {python_file_path}. "
|
||||||
|
"Only one init() is allowed."
|
||||||
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetupCommand, extra_args):
|
def call_stack_deploy_setup(
|
||||||
|
deploy_command_context, parameters: LaconicStackSetupCommand, extra_args
|
||||||
|
):
|
||||||
# Link with the python file in the stack
|
# Link with the python file in the stack
|
||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
@ -202,6 +234,8 @@ def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetu
|
|||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "setup"):
|
if _has_method(imported_stack, "setup"):
|
||||||
@ -217,6 +251,8 @@ def call_stack_deploy_create(deployment_context, extra_args):
|
|||||||
for python_file_path in python_file_paths:
|
for python_file_path in python_file_paths:
|
||||||
if python_file_path.exists():
|
if python_file_path.exists():
|
||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
continue
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
if _has_method(imported_stack, "create"):
|
if _has_method(imported_stack, "create"):
|
||||||
@ -247,7 +283,13 @@ def _find_extra_config_dirs(parsed_pod_file, pod):
|
|||||||
|
|
||||||
|
|
||||||
def _get_mapped_ports(stack: str, map_recipe: str):
|
def _get_mapped_ports(stack: str, map_recipe: str):
|
||||||
port_map_recipes = ["any-variable-random", "localhost-same", "any-same", "localhost-fixed-random", "any-fixed-random"]
|
port_map_recipes = [
|
||||||
|
"any-variable-random",
|
||||||
|
"localhost-same",
|
||||||
|
"any-same",
|
||||||
|
"localhost-fixed-random",
|
||||||
|
"any-fixed-random",
|
||||||
|
]
|
||||||
ports = _get_ports(stack)
|
ports = _get_ports(stack)
|
||||||
if ports:
|
if ports:
|
||||||
# Implement any requested mapping recipe
|
# Implement any requested mapping recipe
|
||||||
@ -259,7 +301,9 @@ def _get_mapped_ports(stack: str, map_recipe: str):
|
|||||||
orig_port = ports_array[x]
|
orig_port = ports_array[x]
|
||||||
# Strip /udp suffix if present
|
# Strip /udp suffix if present
|
||||||
bare_orig_port = orig_port.replace("/udp", "")
|
bare_orig_port = orig_port.replace("/udp", "")
|
||||||
random_port = random.randint(20000, 50000) # Beware: we're relying on luck to not collide
|
random_port = random.randint(
|
||||||
|
20000, 50000
|
||||||
|
) # Beware: we're relying on luck to not collide
|
||||||
if map_recipe == "any-variable-random":
|
if map_recipe == "any-variable-random":
|
||||||
# This is the default so take no action
|
# This is the default so take no action
|
||||||
pass
|
pass
|
||||||
@ -278,7 +322,10 @@ def _get_mapped_ports(stack: str, map_recipe: str):
|
|||||||
else:
|
else:
|
||||||
print("Error: bad map_recipe")
|
print("Error: bad map_recipe")
|
||||||
else:
|
else:
|
||||||
print(f"Error: --map-ports-to-host must specify one of: {port_map_recipes}")
|
print(
|
||||||
|
f"Error: --map-ports-to-host must specify one of: "
|
||||||
|
f"{port_map_recipes}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return ports
|
return ports
|
||||||
|
|
||||||
@ -303,33 +350,54 @@ def _parse_config_variables(variable_values: str):
|
|||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--config", help="Provide config variables for the deployment")
|
@click.option("--config", help="Provide config variables for the deployment")
|
||||||
@click.option("--config-file", help="Provide config variables in a file for the deployment")
|
@click.option(
|
||||||
|
"--config-file", help="Provide config variables in a file for the deployment"
|
||||||
|
)
|
||||||
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
||||||
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
|
@click.option(
|
||||||
|
"--image-registry",
|
||||||
|
help="Provide a container image registry url for this k8s cluster",
|
||||||
|
)
|
||||||
@click.option("--output", required=True, help="Write yaml spec file here")
|
@click.option("--output", required=True, help="Write yaml spec file here")
|
||||||
@click.option("--map-ports-to-host", required=False,
|
@click.option(
|
||||||
help="Map ports to the host as one of: any-variable-random (default), "
|
"--map-ports-to-host",
|
||||||
"localhost-same, any-same, localhost-fixed-random, any-fixed-random")
|
required=False,
|
||||||
|
help="Map ports to the host as one of: any-variable-random (default), "
|
||||||
|
"localhost-same, any-same, localhost-fixed-random, any-fixed-random",
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def init(ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host):
|
def init(
|
||||||
|
ctx, config, config_file, kube_config, image_registry, output, map_ports_to_host
|
||||||
|
):
|
||||||
stack = global_options(ctx).stack
|
stack = global_options(ctx).stack
|
||||||
deployer_type = ctx.obj.deployer.type
|
deployer_type = ctx.obj.deployer.type
|
||||||
deploy_command_context = ctx.obj
|
deploy_command_context = ctx.obj
|
||||||
return init_operation(
|
return init_operation(
|
||||||
deploy_command_context,
|
deploy_command_context,
|
||||||
stack, deployer_type,
|
stack,
|
||||||
config, config_file,
|
deployer_type,
|
||||||
|
config,
|
||||||
|
config_file,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
output,
|
output,
|
||||||
map_ports_to_host)
|
map_ports_to_host,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# The init command's implementation is in a separate function so that we can
|
# The init command's implementation is in a separate function so that we can
|
||||||
# call it from other commands, bypassing the click decoration stuff
|
# call it from other commands, bypassing the click decoration stuff
|
||||||
def init_operation(deploy_command_context, stack, deployer_type, config,
|
def init_operation(
|
||||||
config_file, kube_config, image_registry, output, map_ports_to_host):
|
deploy_command_context,
|
||||||
|
stack,
|
||||||
|
deployer_type,
|
||||||
|
config,
|
||||||
|
config_file,
|
||||||
|
kube_config,
|
||||||
|
image_registry,
|
||||||
|
output,
|
||||||
|
map_ports_to_host,
|
||||||
|
):
|
||||||
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
|
default_spec_file_content = call_stack_deploy_init(deploy_command_context)
|
||||||
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
|
spec_file_content = {"stack": stack, constants.deploy_to_key: deployer_type}
|
||||||
if deployer_type == "k8s":
|
if deployer_type == "k8s":
|
||||||
@ -340,13 +408,20 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
|||||||
if image_registry:
|
if image_registry:
|
||||||
spec_file_content.update({constants.image_registry_key: image_registry})
|
spec_file_content.update({constants.image_registry_key: image_registry})
|
||||||
else:
|
else:
|
||||||
print("WARNING: --image-registry not specified, only default container registries (eg, Docker Hub) will be available")
|
print(
|
||||||
|
"WARNING: --image-registry not specified, only default container "
|
||||||
|
"registries (eg, Docker Hub) will be available"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Check for --kube-config supplied for non-relevant deployer types
|
# Check for --kube-config supplied for non-relevant deployer types
|
||||||
if kube_config is not None:
|
if kube_config is not None:
|
||||||
error_exit(f"--kube-config is not allowed with a {deployer_type} deployment")
|
error_exit(
|
||||||
|
f"--kube-config is not allowed with a {deployer_type} deployment"
|
||||||
|
)
|
||||||
if image_registry is not None:
|
if image_registry is not None:
|
||||||
error_exit(f"--image-registry is not allowed with a {deployer_type} deployment")
|
error_exit(
|
||||||
|
f"--image-registry is not allowed with a {deployer_type} deployment"
|
||||||
|
)
|
||||||
if default_spec_file_content:
|
if default_spec_file_content:
|
||||||
spec_file_content.update(default_spec_file_content)
|
spec_file_content.update(default_spec_file_content)
|
||||||
config_variables = _parse_config_variables(config)
|
config_variables = _parse_config_variables(config)
|
||||||
@ -368,7 +443,9 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
|||||||
spec_file_content.update({"config": merged_config})
|
spec_file_content.update({"config": merged_config})
|
||||||
|
|
||||||
ports = _get_mapped_ports(stack, map_ports_to_host)
|
ports = _get_mapped_ports(stack, map_ports_to_host)
|
||||||
spec_file_content.update({"network": {"ports": ports}})
|
orig_network = spec_file_content.get("network", {})
|
||||||
|
orig_network["ports"] = ports
|
||||||
|
spec_file_content["network"] = orig_network
|
||||||
|
|
||||||
named_volumes = _get_named_volumes(stack)
|
named_volumes = _get_named_volumes(stack)
|
||||||
if named_volumes:
|
if named_volumes:
|
||||||
@ -393,7 +470,9 @@ def init_operation(deploy_command_context, stack, deployer_type, config,
|
|||||||
spec_file_content["configmaps"] = configmap_descriptors
|
spec_file_content["configmaps"] = configmap_descriptors
|
||||||
|
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Creating spec file for stack: {stack} with content: {spec_file_content}")
|
print(
|
||||||
|
f"Creating spec file for stack: {stack} with content: {spec_file_content}"
|
||||||
|
)
|
||||||
|
|
||||||
with open(output, "w") as output_file:
|
with open(output, "w") as output_file:
|
||||||
get_yaml().dump(spec_file_content, output_file)
|
get_yaml().dump(spec_file_content, output_file)
|
||||||
@ -441,24 +520,54 @@ def _check_volume_definitions(spec):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
@click.option(
|
||||||
|
"--spec-file", required=True, help="Spec file to use to create this deployment"
|
||||||
|
)
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||||
|
@click.option(
|
||||||
|
"--helm-chart",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Generate Helm chart instead of deploying (k8s only)",
|
||||||
|
)
|
||||||
# TODO: Hack
|
# TODO: Hack
|
||||||
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
||||||
@click.option("--initial-peers", help="Initial set of persistent peers")
|
@click.option("--initial-peers", help="Initial set of persistent peers")
|
||||||
|
@click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
|
def create(
|
||||||
|
ctx, spec_file, deployment_dir, helm_chart, network_dir, initial_peers, extra_args
|
||||||
|
):
|
||||||
deployment_command_context = ctx.obj
|
deployment_command_context = ctx.obj
|
||||||
return create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers)
|
return create_operation(
|
||||||
|
deployment_command_context,
|
||||||
|
spec_file,
|
||||||
|
deployment_dir,
|
||||||
|
helm_chart,
|
||||||
|
network_dir,
|
||||||
|
initial_peers,
|
||||||
|
extra_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# The init command's implementation is in a separate function so that we can
|
# The init command's implementation is in a separate function so that we can
|
||||||
# call it from other commands, bypassing the click decoration stuff
|
# call it from other commands, bypassing the click decoration stuff
|
||||||
def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers):
|
def create_operation(
|
||||||
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
|
deployment_command_context,
|
||||||
|
spec_file,
|
||||||
|
deployment_dir,
|
||||||
|
helm_chart=False,
|
||||||
|
network_dir=None,
|
||||||
|
initial_peers=None,
|
||||||
|
extra_args=(),
|
||||||
|
):
|
||||||
|
parsed_spec = Spec(
|
||||||
|
os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file)
|
||||||
|
)
|
||||||
_check_volume_definitions(parsed_spec)
|
_check_volume_definitions(parsed_spec)
|
||||||
stack_name = parsed_spec["stack"]
|
stack_name = parsed_spec["stack"]
|
||||||
deployment_type = parsed_spec[constants.deploy_to_key]
|
deployment_type = parsed_spec[constants.deploy_to_key]
|
||||||
|
|
||||||
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
|
stack_file = get_stack_path(stack_name).joinpath(constants.stack_file_name)
|
||||||
parsed_stack = get_parsed_stack_config(stack_name)
|
parsed_stack = get_parsed_stack_config(stack_name)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -473,13 +582,30 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
# Copy spec file and the stack file into the deployment dir
|
# Copy spec file and the stack file into the deployment dir
|
||||||
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
|
copyfile(spec_file, deployment_dir_path.joinpath(constants.spec_file_name))
|
||||||
copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
|
copyfile(stack_file, deployment_dir_path.joinpath(constants.stack_file_name))
|
||||||
|
|
||||||
|
# Create deployment.yml with cluster-id
|
||||||
_create_deployment_file(deployment_dir_path)
|
_create_deployment_file(deployment_dir_path)
|
||||||
|
|
||||||
|
# Branch to Helm chart generation flow if --helm-chart flag is set
|
||||||
|
if deployment_type == "k8s" and helm_chart:
|
||||||
|
from stack_orchestrator.deploy.k8s.helm.chart_generator import (
|
||||||
|
generate_helm_chart,
|
||||||
|
)
|
||||||
|
|
||||||
|
generate_helm_chart(stack_name, spec_file, deployment_dir_path)
|
||||||
|
return # Exit early for helm chart generation
|
||||||
|
|
||||||
|
# Existing deployment flow continues unchanged
|
||||||
# Copy any config varibles from the spec file into an env file suitable for compose
|
# Copy any config varibles from the spec file into an env file suitable for compose
|
||||||
_write_config_file(spec_file, deployment_dir_path.joinpath(constants.config_file_name))
|
_write_config_file(
|
||||||
|
spec_file, deployment_dir_path.joinpath(constants.config_file_name)
|
||||||
|
)
|
||||||
# Copy any k8s config file into the deployment dir
|
# Copy any k8s config file into the deployment dir
|
||||||
if deployment_type == "k8s":
|
if deployment_type == "k8s":
|
||||||
_write_kube_config_file(Path(parsed_spec[constants.kube_config_key]),
|
_write_kube_config_file(
|
||||||
deployment_dir_path.joinpath(constants.kube_config_filename))
|
Path(parsed_spec[constants.kube_config_key]),
|
||||||
|
deployment_dir_path.joinpath(constants.kube_config_filename),
|
||||||
|
)
|
||||||
# Copy the pod files into the deployment dir, fixing up content
|
# Copy the pod files into the deployment dir, fixing up content
|
||||||
pods = get_pod_list(parsed_stack)
|
pods = get_pod_list(parsed_stack)
|
||||||
destination_compose_dir = deployment_dir_path.joinpath("compose")
|
destination_compose_dir = deployment_dir_path.joinpath("compose")
|
||||||
@ -489,6 +615,8 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
yaml = get_yaml()
|
yaml = get_yaml()
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
pod_file_path = get_pod_file_path(stack_name, parsed_stack, pod)
|
||||||
|
if pod_file_path is None:
|
||||||
|
continue
|
||||||
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
|
||||||
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
extra_config_dirs = _find_extra_config_dirs(parsed_pod_file, pod)
|
||||||
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
destination_pod_dir = destination_pods_dir.joinpath(pod)
|
||||||
@ -496,7 +624,9 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"extra config dirs: {extra_config_dirs}")
|
print(f"extra config dirs: {extra_config_dirs}")
|
||||||
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
|
||||||
with open(destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w") as output_file:
|
with open(
|
||||||
|
destination_compose_dir.joinpath("docker-compose-%s.yml" % pod), "w"
|
||||||
|
) as output_file:
|
||||||
yaml.dump(parsed_pod_file, output_file)
|
yaml.dump(parsed_pod_file, output_file)
|
||||||
# Copy the config files for the pod, if any
|
# Copy the config files for the pod, if any
|
||||||
config_dirs = {pod}
|
config_dirs = {pod}
|
||||||
@ -504,8 +634,11 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
for config_dir in config_dirs:
|
for config_dir in config_dirs:
|
||||||
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
source_config_dir = resolve_config_dir(stack_name, config_dir)
|
||||||
if os.path.exists(source_config_dir):
|
if os.path.exists(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath("config", config_dir)
|
destination_config_dir = deployment_dir_path.joinpath(
|
||||||
# If the same config dir appears in multiple pods, it may already have been copied
|
"config", config_dir
|
||||||
|
)
|
||||||
|
# If the same config dir appears in multiple pods, it may already have
|
||||||
|
# been copied
|
||||||
if not os.path.exists(destination_config_dir):
|
if not os.path.exists(destination_config_dir):
|
||||||
copytree(source_config_dir, destination_config_dir)
|
copytree(source_config_dir, destination_config_dir)
|
||||||
# Copy the script files for the pod, if any
|
# Copy the script files for the pod, if any
|
||||||
@ -518,8 +651,12 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
for configmap in parsed_spec.get_configmaps():
|
for configmap in parsed_spec.get_configmaps():
|
||||||
source_config_dir = resolve_config_dir(stack_name, configmap)
|
source_config_dir = resolve_config_dir(stack_name, configmap)
|
||||||
if os.path.exists(source_config_dir):
|
if os.path.exists(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath("configmaps", configmap)
|
destination_config_dir = deployment_dir_path.joinpath(
|
||||||
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
"configmaps", configmap
|
||||||
|
)
|
||||||
|
copytree(
|
||||||
|
source_config_dir, destination_config_dir, dirs_exist_ok=True
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# TODO: We should probably only do this if the volume is marked :ro.
|
# TODO: We should probably only do this if the volume is marked :ro.
|
||||||
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
||||||
@ -528,20 +665,51 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
||||||
destination_config_dir = deployment_dir_path.joinpath(volume_path)
|
destination_config_dir = deployment_dir_path.joinpath(volume_path)
|
||||||
# Only copy if the destination exists and _is_ empty.
|
# Only copy if the destination exists and _is_ empty.
|
||||||
if os.path.exists(destination_config_dir) and not os.listdir(destination_config_dir):
|
if os.path.exists(destination_config_dir) and not os.listdir(
|
||||||
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
destination_config_dir
|
||||||
|
):
|
||||||
|
copytree(
|
||||||
|
source_config_dir,
|
||||||
|
destination_config_dir,
|
||||||
|
dirs_exist_ok=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Copy the job files into the deployment dir (for Docker deployments)
|
||||||
|
jobs = get_job_list(parsed_stack)
|
||||||
|
if jobs and not parsed_spec.is_kubernetes_deployment():
|
||||||
|
destination_compose_jobs_dir = deployment_dir_path.joinpath("compose-jobs")
|
||||||
|
os.mkdir(destination_compose_jobs_dir)
|
||||||
|
for job in jobs:
|
||||||
|
job_file_path = get_job_file_path(stack_name, parsed_stack, job)
|
||||||
|
if job_file_path and job_file_path.exists():
|
||||||
|
parsed_job_file = yaml.load(open(job_file_path, "r"))
|
||||||
|
_fixup_pod_file(parsed_job_file, parsed_spec, destination_compose_dir)
|
||||||
|
with open(
|
||||||
|
destination_compose_jobs_dir.joinpath(
|
||||||
|
"docker-compose-%s.yml" % job
|
||||||
|
),
|
||||||
|
"w",
|
||||||
|
) as output_file:
|
||||||
|
yaml.dump(parsed_job_file, output_file)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Copied job compose file: {job}")
|
||||||
|
|
||||||
# Delegate to the stack's Python code
|
# Delegate to the stack's Python code
|
||||||
# The deploy create command doesn't require a --stack argument so we need to insert the
|
# The deploy create command doesn't require a --stack argument so we need
|
||||||
# stack member here.
|
# to insert the stack member here.
|
||||||
deployment_command_context.stack = stack_name
|
deployment_command_context.stack = stack_name
|
||||||
deployment_context = DeploymentContext()
|
deployment_context = DeploymentContext()
|
||||||
deployment_context.init(deployment_dir_path)
|
deployment_context.init(deployment_dir_path)
|
||||||
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
# Call the deployer to generate any deployer-specific files (e.g. for kind)
|
||||||
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
|
deployer_config_generator = getDeployerConfigGenerator(
|
||||||
|
deployment_type, deployment_context
|
||||||
|
)
|
||||||
# TODO: make deployment_dir_path a Path above
|
# TODO: make deployment_dir_path a Path above
|
||||||
deployer_config_generator.generate(deployment_dir_path)
|
if deployer_config_generator is not None:
|
||||||
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
|
deployer_config_generator.generate(deployment_dir_path)
|
||||||
|
call_stack_deploy_create(
|
||||||
|
deployment_context, [network_dir, initial_peers, *extra_args]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO: this code should be in the stack .py files but
|
# TODO: this code should be in the stack .py files but
|
||||||
@ -551,18 +719,50 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
@click.option("--node-moniker", help="Moniker for this node")
|
@click.option("--node-moniker", help="Moniker for this node")
|
||||||
@click.option("--chain-id", help="The new chain id")
|
@click.option("--chain-id", help="The new chain id")
|
||||||
@click.option("--key-name", help="Name for new node key")
|
@click.option("--key-name", help="Name for new node key")
|
||||||
@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
|
@click.option(
|
||||||
@click.option("--gentx-addresses", type=str, help="List of comma-delimited validator addresses for other nodes")
|
"--gentx-files", help="List of comma-delimited gentx filenames from other nodes"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--gentx-addresses",
|
||||||
|
type=str,
|
||||||
|
help="List of comma-delimited validator addresses for other nodes",
|
||||||
|
)
|
||||||
@click.option("--genesis-file", help="Genesis file for the network")
|
@click.option("--genesis-file", help="Genesis file for the network")
|
||||||
@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
|
@click.option(
|
||||||
|
"--initialize-network", is_flag=True, default=False, help="Initialize phase"
|
||||||
|
)
|
||||||
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
||||||
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
||||||
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
||||||
@click.option("--network-dir", help="Directory for network files")
|
@click.option("--network-dir", help="Directory for network files")
|
||||||
@click.argument('extra_args', nargs=-1)
|
@click.argument("extra_args", nargs=-1)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def setup(ctx, node_moniker, chain_id, key_name, gentx_files, gentx_addresses, genesis_file, initialize_network, join_network,
|
def setup(
|
||||||
connect_network, create_network, network_dir, extra_args):
|
ctx,
|
||||||
parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, connect_network,
|
node_moniker,
|
||||||
create_network, gentx_files, gentx_addresses, genesis_file, network_dir)
|
chain_id,
|
||||||
|
key_name,
|
||||||
|
gentx_files,
|
||||||
|
gentx_addresses,
|
||||||
|
genesis_file,
|
||||||
|
initialize_network,
|
||||||
|
join_network,
|
||||||
|
connect_network,
|
||||||
|
create_network,
|
||||||
|
network_dir,
|
||||||
|
extra_args,
|
||||||
|
):
|
||||||
|
parmeters = LaconicStackSetupCommand(
|
||||||
|
chain_id,
|
||||||
|
node_moniker,
|
||||||
|
key_name,
|
||||||
|
initialize_network,
|
||||||
|
join_network,
|
||||||
|
connect_network,
|
||||||
|
create_network,
|
||||||
|
gentx_files,
|
||||||
|
gentx_addresses,
|
||||||
|
genesis_file,
|
||||||
|
network_dir,
|
||||||
|
)
|
||||||
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
||||||
|
|||||||
@ -32,7 +32,9 @@ def _image_needs_pushed(image: str):
|
|||||||
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
def _remote_tag_for_image(image: str, remote_repo_url: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
image_name_with_version = (
|
||||||
|
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
|
)
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
return f"{remote_repo_url}/{image_name}:deploy"
|
return f"{remote_repo_url}/{image_name}:deploy"
|
||||||
@ -61,17 +63,22 @@ def add_tags_to_image(remote_repo_url: str, local_tag: str, *additional_tags):
|
|||||||
|
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
remote_tag = _remote_tag_for_image(local_tag, remote_repo_url)
|
||||||
new_remote_tags = [_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags]
|
new_remote_tags = [
|
||||||
|
_remote_tag_for_image(tag, remote_repo_url) for tag in additional_tags
|
||||||
|
]
|
||||||
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
docker.buildx.imagetools.create(sources=[remote_tag], tags=new_remote_tags)
|
||||||
|
|
||||||
|
|
||||||
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id: str):
|
||||||
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
# Turns image tags of the form: foo/bar:local into remote.repo/org/bar:deploy
|
||||||
major_parts = image.split("/", 2)
|
major_parts = image.split("/", 2)
|
||||||
image_name_with_version = major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
image_name_with_version = (
|
||||||
|
major_parts[1] if 2 == len(major_parts) else major_parts[0]
|
||||||
|
)
|
||||||
(image_name, image_version) = image_name_with_version.split(":")
|
(image_name, image_version) = image_name_with_version.split(":")
|
||||||
if image_version == "local":
|
if image_version == "local":
|
||||||
# Salt the tag with part of the deployment id to make it unique to this deployment
|
# Salt the tag with part of the deployment id to make it unique to this
|
||||||
|
# deployment
|
||||||
deployment_tag = deployment_id[-8:]
|
deployment_tag = deployment_id[-8:]
|
||||||
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
return f"{remote_repo_url}/{image_name}:deploy-{deployment_tag}"
|
||||||
else:
|
else:
|
||||||
@ -79,7 +86,9 @@ def remote_tag_for_image_unique(image: str, remote_repo_url: str, deployment_id:
|
|||||||
|
|
||||||
|
|
||||||
# TODO: needs lots of error handling
|
# TODO: needs lots of error handling
|
||||||
def push_images_operation(command_context: DeployCommandContext, deployment_context: DeploymentContext):
|
def push_images_operation(
|
||||||
|
command_context: DeployCommandContext, deployment_context: DeploymentContext
|
||||||
|
):
|
||||||
# Get the list of images for the stack
|
# Get the list of images for the stack
|
||||||
cluster_context = command_context.cluster_context
|
cluster_context = command_context.cluster_context
|
||||||
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
images: Set[str] = images_for_deployment(cluster_context.compose_files)
|
||||||
@ -88,14 +97,18 @@ def push_images_operation(command_context: DeployCommandContext, deployment_cont
|
|||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
remote_tag = remote_tag_for_image_unique(
|
||||||
|
image, remote_repo_url, deployment_context.id
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Tagging {image} to {remote_tag}")
|
print(f"Tagging {image} to {remote_tag}")
|
||||||
docker.image.tag(image, remote_tag)
|
docker.image.tag(image, remote_tag)
|
||||||
# Run docker push commands to upload
|
# Run docker push commands to upload
|
||||||
for image in images:
|
for image in images:
|
||||||
if _image_needs_pushed(image):
|
if _image_needs_pushed(image):
|
||||||
remote_tag = remote_tag_for_image_unique(image, remote_repo_url, deployment_context.id)
|
remote_tag = remote_tag_for_image_unique(
|
||||||
|
image, remote_repo_url, deployment_context.id
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Pushing image {remote_tag}")
|
print(f"Pushing image {remote_tag}")
|
||||||
docker.image.push(remote_tag)
|
docker.image.push(remote_tag)
|
||||||
|
|||||||
@ -17,30 +17,41 @@ import os
|
|||||||
import base64
|
import base64
|
||||||
|
|
||||||
from kubernetes import client
|
from kubernetes import client
|
||||||
from typing import Any, List, Set
|
from typing import Any, List, Optional, Set
|
||||||
|
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import env_var_map_from_file
|
from stack_orchestrator.util import env_var_map_from_file
|
||||||
from stack_orchestrator.deploy.k8s.helpers import named_volumes_from_pod_files, volume_mounts_for_service, volumes_for_pod_files
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
|
named_volumes_from_pod_files,
|
||||||
|
volume_mounts_for_service,
|
||||||
|
volumes_for_pod_files,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
from stack_orchestrator.deploy.k8s.helpers import get_kind_pv_bind_mount_path
|
||||||
from stack_orchestrator.deploy.k8s.helpers import envs_from_environment_variables_map, envs_from_compose_file, merge_envs
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names, images_for_deployment
|
envs_from_environment_variables_map,
|
||||||
|
envs_from_compose_file,
|
||||||
|
merge_envs,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.deploy_util import (
|
||||||
|
parsed_pod_files_map_from_file_names,
|
||||||
|
images_for_deployment,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
from stack_orchestrator.deploy.deploy_types import DeployEnvVars
|
||||||
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
from stack_orchestrator.deploy.spec import Spec, Resources, ResourceLimits
|
||||||
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
from stack_orchestrator.deploy.images import remote_tag_for_image_unique
|
||||||
|
|
||||||
DEFAULT_VOLUME_RESOURCES = Resources({
|
DEFAULT_VOLUME_RESOURCES = Resources({"reservations": {"storage": "2Gi"}})
|
||||||
"reservations": {"storage": "2Gi"}
|
|
||||||
})
|
|
||||||
|
|
||||||
DEFAULT_CONTAINER_RESOURCES = Resources({
|
DEFAULT_CONTAINER_RESOURCES = Resources(
|
||||||
"reservations": {"cpus": "0.1", "memory": "200M"},
|
{
|
||||||
"limits": {"cpus": "1.0", "memory": "2000M"},
|
"reservations": {"cpus": "1.0", "memory": "2000M"},
|
||||||
})
|
"limits": {"cpus": "4.0", "memory": "8000M"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequirements:
|
||||||
def to_dict(limits: ResourceLimits):
|
def to_dict(limits: Optional[ResourceLimits]):
|
||||||
if not limits:
|
if not limits:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -54,8 +65,7 @@ def to_k8s_resource_requirements(resources: Resources) -> client.V1ResourceRequi
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
return client.V1ResourceRequirements(
|
return client.V1ResourceRequirements(
|
||||||
requests=to_dict(resources.reservations),
|
requests=to_dict(resources.reservations), limits=to_dict(resources.limits)
|
||||||
limits=to_dict(resources.limits)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -73,10 +83,14 @@ class ClusterInfo:
|
|||||||
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
self.parsed_pod_yaml_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
# Find the set of images in the pods
|
# Find the set of images in the pods
|
||||||
self.image_set = images_for_deployment(pod_files)
|
self.image_set = images_for_deployment(pod_files)
|
||||||
self.environment_variables = DeployEnvVars(env_var_map_from_file(compose_env_file))
|
# Filter out None values from env file
|
||||||
|
env_vars = {
|
||||||
|
k: v for k, v in env_var_map_from_file(compose_env_file).items() if v
|
||||||
|
}
|
||||||
|
self.environment_variables = DeployEnvVars(env_vars)
|
||||||
self.app_name = deployment_name
|
self.app_name = deployment_name
|
||||||
self.spec = spec
|
self.spec = spec
|
||||||
if (opts.o.debug):
|
if opts.o.debug:
|
||||||
print(f"Env vars: {self.environment_variables.map}")
|
print(f"Env vars: {self.environment_variables.map}")
|
||||||
|
|
||||||
def get_nodeports(self):
|
def get_nodeports(self):
|
||||||
@ -90,31 +104,48 @@ class ClusterInfo:
|
|||||||
for raw_port in [str(p) for p in service_info["ports"]]:
|
for raw_port in [str(p) for p in service_info["ports"]]:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"service port: {raw_port}")
|
print(f"service port: {raw_port}")
|
||||||
if ":" in raw_port:
|
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
||||||
parts = raw_port.split(":")
|
# protocol=UDP)
|
||||||
|
protocol = "TCP"
|
||||||
|
port_str = raw_port
|
||||||
|
if "/" in raw_port:
|
||||||
|
port_str, proto = raw_port.rsplit("/", 1)
|
||||||
|
protocol = proto.upper()
|
||||||
|
if ":" in port_str:
|
||||||
|
parts = port_str.split(":")
|
||||||
if len(parts) != 2:
|
if len(parts) != 2:
|
||||||
raise Exception(f"Invalid port definition: {raw_port}")
|
raise Exception(f"Invalid port definition: {raw_port}")
|
||||||
node_port = int(parts[0])
|
node_port = int(parts[0])
|
||||||
pod_port = int(parts[1])
|
pod_port = int(parts[1])
|
||||||
else:
|
else:
|
||||||
node_port = None
|
node_port = None
|
||||||
pod_port = int(raw_port)
|
pod_port = int(port_str)
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}"),
|
metadata=client.V1ObjectMeta(
|
||||||
|
name=(
|
||||||
|
f"{self.app_name}-nodeport-"
|
||||||
|
f"{pod_port}-{protocol.lower()}"
|
||||||
|
)
|
||||||
|
),
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="NodePort",
|
type="NodePort",
|
||||||
ports=[client.V1ServicePort(
|
ports=[
|
||||||
port=pod_port,
|
client.V1ServicePort(
|
||||||
target_port=pod_port,
|
port=pod_port,
|
||||||
node_port=node_port
|
target_port=pod_port,
|
||||||
)],
|
node_port=node_port,
|
||||||
selector={"app": self.app_name}
|
protocol=protocol,
|
||||||
)
|
)
|
||||||
|
],
|
||||||
|
selector={"app": self.app_name},
|
||||||
|
),
|
||||||
)
|
)
|
||||||
nodeports.append(service)
|
nodeports.append(service)
|
||||||
return nodeports
|
return nodeports
|
||||||
|
|
||||||
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
def get_ingress(
|
||||||
|
self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"
|
||||||
|
):
|
||||||
# No ingress for a deployment that has no http-proxy defined, for now
|
# No ingress for a deployment that has no http-proxy defined, for now
|
||||||
http_proxy_info_list = self.spec.get_http_proxy()
|
http_proxy_info_list = self.spec.get_http_proxy()
|
||||||
ingress = None
|
ingress = None
|
||||||
@ -126,10 +157,20 @@ class ClusterInfo:
|
|||||||
# TODO: good enough parsing for webapp deployment for now
|
# TODO: good enough parsing for webapp deployment for now
|
||||||
host_name = http_proxy_info["host-name"]
|
host_name = http_proxy_info["host-name"]
|
||||||
rules = []
|
rules = []
|
||||||
tls = [client.V1IngressTLS(
|
tls = (
|
||||||
hosts=certificate["spec"]["dnsNames"] if certificate else [host_name],
|
[
|
||||||
secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls"
|
client.V1IngressTLS(
|
||||||
)] if use_tls else None
|
hosts=certificate["spec"]["dnsNames"]
|
||||||
|
if certificate
|
||||||
|
else [host_name],
|
||||||
|
secret_name=certificate["spec"]["secretName"]
|
||||||
|
if certificate
|
||||||
|
else f"{self.app_name}-tls",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if use_tls
|
||||||
|
else None
|
||||||
|
)
|
||||||
paths = []
|
paths = []
|
||||||
for route in http_proxy_info["routes"]:
|
for route in http_proxy_info["routes"]:
|
||||||
path = route["path"]
|
path = route["path"]
|
||||||
@ -138,65 +179,71 @@ class ClusterInfo:
|
|||||||
print(f"proxy config: {path} -> {proxy_to}")
|
print(f"proxy config: {path} -> {proxy_to}")
|
||||||
# proxy_to has the form <service>:<port>
|
# proxy_to has the form <service>:<port>
|
||||||
proxy_to_port = int(proxy_to.split(":")[1])
|
proxy_to_port = int(proxy_to.split(":")[1])
|
||||||
paths.append(client.V1HTTPIngressPath(
|
paths.append(
|
||||||
path_type="Prefix",
|
client.V1HTTPIngressPath(
|
||||||
path=path,
|
path_type="Prefix",
|
||||||
backend=client.V1IngressBackend(
|
path=path,
|
||||||
service=client.V1IngressServiceBackend(
|
backend=client.V1IngressBackend(
|
||||||
# TODO: this looks wrong
|
service=client.V1IngressServiceBackend(
|
||||||
name=f"{self.app_name}-service",
|
# TODO: this looks wrong
|
||||||
# TODO: pull port number from the service
|
name=f"{self.app_name}-service",
|
||||||
port=client.V1ServiceBackendPort(number=proxy_to_port)
|
# TODO: pull port number from the service
|
||||||
)
|
port=client.V1ServiceBackendPort(number=proxy_to_port),
|
||||||
|
)
|
||||||
|
),
|
||||||
)
|
)
|
||||||
))
|
|
||||||
rules.append(client.V1IngressRule(
|
|
||||||
host=host_name,
|
|
||||||
http=client.V1HTTPIngressRuleValue(
|
|
||||||
paths=paths
|
|
||||||
)
|
)
|
||||||
))
|
rules.append(
|
||||||
spec = client.V1IngressSpec(
|
client.V1IngressRule(
|
||||||
tls=tls,
|
host=host_name, http=client.V1HTTPIngressRuleValue(paths=paths)
|
||||||
rules=rules
|
)
|
||||||
)
|
)
|
||||||
|
spec = client.V1IngressSpec(tls=tls, rules=rules)
|
||||||
|
|
||||||
ingress_annotations = {
|
ingress_annotations = {
|
||||||
"kubernetes.io/ingress.class": "nginx",
|
"kubernetes.io/ingress.class": "caddy",
|
||||||
}
|
}
|
||||||
if not certificate:
|
if not certificate:
|
||||||
ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
|
ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
|
||||||
|
|
||||||
ingress = client.V1Ingress(
|
ingress = client.V1Ingress(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(
|
||||||
name=f"{self.app_name}-ingress",
|
name=f"{self.app_name}-ingress", annotations=ingress_annotations
|
||||||
annotations=ingress_annotations
|
|
||||||
),
|
),
|
||||||
spec=spec
|
spec=spec,
|
||||||
)
|
)
|
||||||
return ingress
|
return ingress
|
||||||
|
|
||||||
# TODO: suppoprt multiple services
|
# TODO: suppoprt multiple services
|
||||||
def get_service(self):
|
def get_service(self):
|
||||||
for pod_name in self.parsed_pod_yaml_map:
|
# Collect all ports from http-proxy routes
|
||||||
pod = self.parsed_pod_yaml_map[pod_name]
|
ports_set = set()
|
||||||
services = pod["services"]
|
http_proxy_list = self.spec.get_http_proxy()
|
||||||
for service_name in services:
|
if http_proxy_list:
|
||||||
service_info = services[service_name]
|
for http_proxy in http_proxy_list:
|
||||||
if "ports" in service_info:
|
for route in http_proxy.get("routes", []):
|
||||||
port = int(service_info["ports"][0])
|
proxy_to = route.get("proxy-to", "")
|
||||||
if opts.o.debug:
|
if ":" in proxy_to:
|
||||||
print(f"service port: {port}")
|
port = int(proxy_to.split(":")[1])
|
||||||
|
ports_set.add(port)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"http-proxy route port: {port}")
|
||||||
|
|
||||||
|
if not ports_set:
|
||||||
|
return None
|
||||||
|
|
||||||
|
service_ports = [
|
||||||
|
client.V1ServicePort(port=p, target_port=p, name=f"port-{p}")
|
||||||
|
for p in sorted(ports_set)
|
||||||
|
]
|
||||||
|
|
||||||
service = client.V1Service(
|
service = client.V1Service(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-service"),
|
||||||
spec=client.V1ServiceSpec(
|
spec=client.V1ServiceSpec(
|
||||||
type="ClusterIP",
|
type="ClusterIP",
|
||||||
ports=[client.V1ServicePort(
|
ports=service_ports,
|
||||||
port=port,
|
selector={"app": self.app_name},
|
||||||
target_port=port
|
),
|
||||||
)],
|
|
||||||
selector={"app": self.app_name}
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return service
|
return service
|
||||||
|
|
||||||
@ -219,7 +266,7 @@ class ClusterInfo:
|
|||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"app": self.app_name,
|
"app": self.app_name,
|
||||||
"volume-label": f"{self.app_name}-{volume_name}"
|
"volume-label": f"{self.app_name}-{volume_name}",
|
||||||
}
|
}
|
||||||
if volume_path:
|
if volume_path:
|
||||||
storage_class_name = "manual"
|
storage_class_name = "manual"
|
||||||
@ -233,11 +280,13 @@ class ClusterInfo:
|
|||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
storage_class_name=storage_class_name,
|
storage_class_name=storage_class_name,
|
||||||
resources=to_k8s_resource_requirements(resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
volume_name=k8s_volume_name
|
volume_name=k8s_volume_name,
|
||||||
)
|
)
|
||||||
pvc = client.V1PersistentVolumeClaim(
|
pvc = client.V1PersistentVolumeClaim(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}", labels=labels),
|
metadata=client.V1ObjectMeta(
|
||||||
spec=spec
|
name=f"{self.app_name}-{volume_name}", labels=labels
|
||||||
|
),
|
||||||
|
spec=spec,
|
||||||
)
|
)
|
||||||
result.append(pvc)
|
result.append(pvc)
|
||||||
return result
|
return result
|
||||||
@ -252,21 +301,28 @@ class ClusterInfo:
|
|||||||
print(f"{cfg_map_name} not in pod files")
|
print(f"{cfg_map_name} not in pod files")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not cfg_map_path.startswith("/"):
|
if not cfg_map_path.startswith("/") and self.spec.file_path is not None:
|
||||||
cfg_map_path = os.path.join(os.path.dirname(self.spec.file_path), cfg_map_path)
|
cfg_map_path = os.path.join(
|
||||||
|
os.path.dirname(str(self.spec.file_path)), cfg_map_path
|
||||||
|
)
|
||||||
|
|
||||||
# Read in all the files at a single-level of the directory. This mimics the behavior
|
# Read in all the files at a single-level of the directory.
|
||||||
# of `kubectl create configmap foo --from-file=/path/to/dir`
|
# This mimics the behavior of
|
||||||
|
# `kubectl create configmap foo --from-file=/path/to/dir`
|
||||||
data = {}
|
data = {}
|
||||||
for f in os.listdir(cfg_map_path):
|
for f in os.listdir(cfg_map_path):
|
||||||
full_path = os.path.join(cfg_map_path, f)
|
full_path = os.path.join(cfg_map_path, f)
|
||||||
if os.path.isfile(full_path):
|
if os.path.isfile(full_path):
|
||||||
data[f] = base64.b64encode(open(full_path, 'rb').read()).decode('ASCII')
|
data[f] = base64.b64encode(open(full_path, "rb").read()).decode(
|
||||||
|
"ASCII"
|
||||||
|
)
|
||||||
|
|
||||||
spec = client.V1ConfigMap(
|
spec = client.V1ConfigMap(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
metadata=client.V1ObjectMeta(
|
||||||
labels={"configmap-label": cfg_map_name}),
|
name=f"{self.app_name}-{cfg_map_name}",
|
||||||
binary_data=data
|
labels={"configmap-label": cfg_map_name},
|
||||||
|
),
|
||||||
|
binary_data=data,
|
||||||
)
|
)
|
||||||
result.append(spec)
|
result.append(spec)
|
||||||
return result
|
return result
|
||||||
@ -280,10 +336,14 @@ class ClusterInfo:
|
|||||||
resources = DEFAULT_VOLUME_RESOURCES
|
resources = DEFAULT_VOLUME_RESOURCES
|
||||||
for volume_name, volume_path in spec_volumes.items():
|
for volume_name, volume_path in spec_volumes.items():
|
||||||
# We only need to create a volume if it is fully qualified HostPath.
|
# We only need to create a volume if it is fully qualified HostPath.
|
||||||
# Otherwise, we create the PVC and expect the node to allocate the volume for us.
|
# Otherwise, we create the PVC and expect the node to allocate the volume
|
||||||
|
# for us.
|
||||||
if not volume_path:
|
if not volume_path:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"{volume_name} does not require an explicit PersistentVolume, since it is not a bind-mount.")
|
print(
|
||||||
|
f"{volume_name} does not require an explicit "
|
||||||
|
"PersistentVolume, since it is not a bind-mount."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if volume_name not in named_volumes:
|
if volume_name not in named_volumes:
|
||||||
@ -292,30 +352,38 @@ class ClusterInfo:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not os.path.isabs(volume_path):
|
if not os.path.isabs(volume_path):
|
||||||
print(f"WARNING: {volume_name}:{volume_path} is not absolute, cannot bind volume.")
|
print(
|
||||||
|
f"WARNING: {volume_name}:{volume_path} is not absolute, "
|
||||||
|
"cannot bind volume."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.spec.is_kind_deployment():
|
if self.spec.is_kind_deployment():
|
||||||
host_path = client.V1HostPathVolumeSource(path=get_kind_pv_bind_mount_path(volume_name))
|
host_path = client.V1HostPathVolumeSource(
|
||||||
|
path=get_kind_pv_bind_mount_path(volume_name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
host_path = client.V1HostPathVolumeSource(path=volume_path)
|
||||||
spec = client.V1PersistentVolumeSpec(
|
spec = client.V1PersistentVolumeSpec(
|
||||||
storage_class_name="manual",
|
storage_class_name="manual",
|
||||||
access_modes=["ReadWriteOnce"],
|
access_modes=["ReadWriteOnce"],
|
||||||
capacity=to_k8s_resource_requirements(resources).requests,
|
capacity=to_k8s_resource_requirements(resources).requests,
|
||||||
host_path=host_path
|
host_path=host_path,
|
||||||
)
|
)
|
||||||
pv = client.V1PersistentVolume(
|
pv = client.V1PersistentVolume(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{volume_name}",
|
metadata=client.V1ObjectMeta(
|
||||||
labels={"volume-label": f"{self.app_name}-{volume_name}"}),
|
name=f"{self.app_name}-{volume_name}",
|
||||||
|
labels={"volume-label": f"{self.app_name}-{volume_name}"},
|
||||||
|
),
|
||||||
spec=spec,
|
spec=spec,
|
||||||
)
|
)
|
||||||
result.append(pv)
|
result.append(pv)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# TODO: put things like image pull policy into an object-scope struct
|
# TODO: put things like image pull policy into an object-scope struct
|
||||||
def get_deployment(self, image_pull_policy: str = None):
|
def get_deployment(self, image_pull_policy: Optional[str] = None):
|
||||||
containers = []
|
containers = []
|
||||||
|
services = {}
|
||||||
resources = self.spec.get_container_resources()
|
resources = self.spec.get_container_resources()
|
||||||
if not resources:
|
if not resources:
|
||||||
resources = DEFAULT_CONTAINER_RESOURCES
|
resources = DEFAULT_CONTAINER_RESOURCES
|
||||||
@ -326,42 +394,88 @@ class ClusterInfo:
|
|||||||
container_name = service_name
|
container_name = service_name
|
||||||
service_info = services[service_name]
|
service_info = services[service_name]
|
||||||
image = service_info["image"]
|
image = service_info["image"]
|
||||||
|
container_ports = []
|
||||||
if "ports" in service_info:
|
if "ports" in service_info:
|
||||||
port = int(service_info["ports"][0])
|
for raw_port in [str(p) for p in service_info["ports"]]:
|
||||||
|
# Parse protocol suffix (e.g., "8001/udp" -> port=8001,
|
||||||
|
# protocol=UDP)
|
||||||
|
protocol = "TCP"
|
||||||
|
port_str = raw_port
|
||||||
|
if "/" in raw_port:
|
||||||
|
port_str, proto = raw_port.rsplit("/", 1)
|
||||||
|
protocol = proto.upper()
|
||||||
|
# Handle host:container port mapping - use container port
|
||||||
|
if ":" in port_str:
|
||||||
|
port_str = port_str.split(":")[-1]
|
||||||
|
port = int(port_str)
|
||||||
|
container_ports.append(
|
||||||
|
client.V1ContainerPort(
|
||||||
|
container_port=port, protocol=protocol
|
||||||
|
)
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image: {image}")
|
print(f"image: {image}")
|
||||||
print(f"service port: {port}")
|
print(f"service ports: {container_ports}")
|
||||||
merged_envs = merge_envs(
|
merged_envs = (
|
||||||
envs_from_compose_file(
|
merge_envs(
|
||||||
service_info["environment"]), self.environment_variables.map
|
envs_from_compose_file(
|
||||||
) if "environment" in service_info else self.environment_variables.map
|
service_info["environment"], self.environment_variables.map
|
||||||
|
),
|
||||||
|
self.environment_variables.map,
|
||||||
|
)
|
||||||
|
if "environment" in service_info
|
||||||
|
else self.environment_variables.map
|
||||||
|
)
|
||||||
envs = envs_from_environment_variables_map(merged_envs)
|
envs = envs_from_environment_variables_map(merged_envs)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Merged envs: {envs}")
|
print(f"Merged envs: {envs}")
|
||||||
# Re-write the image tag for remote deployment
|
# Re-write the image tag for remote deployment
|
||||||
# Note self.app_name has the same value as deployment_id
|
# Note self.app_name has the same value as deployment_id
|
||||||
image_to_use = remote_tag_for_image_unique(
|
image_to_use = (
|
||||||
image,
|
remote_tag_for_image_unique(
|
||||||
self.spec.get_image_registry(),
|
image, self.spec.get_image_registry(), self.app_name
|
||||||
self.app_name) if self.spec.get_image_registry() is not None else image
|
)
|
||||||
volume_mounts = volume_mounts_for_service(self.parsed_pod_yaml_map, service_name)
|
if self.spec.get_image_registry() is not None
|
||||||
|
else image
|
||||||
|
)
|
||||||
|
volume_mounts = volume_mounts_for_service(
|
||||||
|
self.parsed_pod_yaml_map, service_name
|
||||||
|
)
|
||||||
|
# Handle command/entrypoint from compose file
|
||||||
|
# In docker-compose: entrypoint -> k8s command, command -> k8s args
|
||||||
|
container_command = None
|
||||||
|
container_args = None
|
||||||
|
if "entrypoint" in service_info:
|
||||||
|
entrypoint = service_info["entrypoint"]
|
||||||
|
container_command = (
|
||||||
|
entrypoint if isinstance(entrypoint, list) else [entrypoint]
|
||||||
|
)
|
||||||
|
if "command" in service_info:
|
||||||
|
cmd = service_info["command"]
|
||||||
|
container_args = cmd if isinstance(cmd, list) else cmd.split()
|
||||||
container = client.V1Container(
|
container = client.V1Container(
|
||||||
name=container_name,
|
name=container_name,
|
||||||
image=image_to_use,
|
image=image_to_use,
|
||||||
image_pull_policy=image_pull_policy,
|
image_pull_policy=image_pull_policy,
|
||||||
|
command=container_command,
|
||||||
|
args=container_args,
|
||||||
env=envs,
|
env=envs,
|
||||||
ports=[client.V1ContainerPort(container_port=port)],
|
ports=container_ports if container_ports else None,
|
||||||
volume_mounts=volume_mounts,
|
volume_mounts=volume_mounts,
|
||||||
security_context=client.V1SecurityContext(
|
security_context=client.V1SecurityContext(
|
||||||
privileged=self.spec.get_privileged(),
|
privileged=self.spec.get_privileged(),
|
||||||
capabilities=client.V1Capabilities(
|
capabilities=client.V1Capabilities(
|
||||||
add=self.spec.get_capabilities()
|
add=self.spec.get_capabilities()
|
||||||
) if self.spec.get_capabilities() else None
|
)
|
||||||
|
if self.spec.get_capabilities()
|
||||||
|
else None,
|
||||||
),
|
),
|
||||||
resources=to_k8s_resource_requirements(resources),
|
resources=to_k8s_resource_requirements(resources),
|
||||||
)
|
)
|
||||||
containers.append(container)
|
containers.append(container)
|
||||||
volumes = volumes_for_pod_files(self.parsed_pod_yaml_map, self.spec, self.app_name)
|
volumes = volumes_for_pod_files(
|
||||||
|
self.parsed_pod_yaml_map, self.spec, self.app_name
|
||||||
|
)
|
||||||
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
image_pull_secrets = [client.V1LocalObjectReference(name="laconic-registry")]
|
||||||
|
|
||||||
annotations = None
|
annotations = None
|
||||||
@ -384,55 +498,55 @@ class ClusterInfo:
|
|||||||
affinities = []
|
affinities = []
|
||||||
for rule in self.spec.get_node_affinities():
|
for rule in self.spec.get_node_affinities():
|
||||||
# TODO add some input validation here
|
# TODO add some input validation here
|
||||||
label_name = rule['label']
|
label_name = rule["label"]
|
||||||
label_value = rule['value']
|
label_value = rule["value"]
|
||||||
affinities.append(client.V1NodeSelectorTerm(
|
affinities.append(
|
||||||
match_expressions=[client.V1NodeSelectorRequirement(
|
client.V1NodeSelectorTerm(
|
||||||
key=label_name,
|
match_expressions=[
|
||||||
operator="In",
|
client.V1NodeSelectorRequirement(
|
||||||
values=[label_value]
|
key=label_name, operator="In", values=[label_value]
|
||||||
)]
|
)
|
||||||
)
|
]
|
||||||
)
|
)
|
||||||
|
)
|
||||||
affinity = client.V1Affinity(
|
affinity = client.V1Affinity(
|
||||||
node_affinity=client.V1NodeAffinity(
|
node_affinity=client.V1NodeAffinity(
|
||||||
required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
|
required_during_scheduling_ignored_during_execution=(
|
||||||
node_selector_terms=affinities
|
client.V1NodeSelector(node_selector_terms=affinities)
|
||||||
))
|
)
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if self.spec.get_node_tolerations():
|
if self.spec.get_node_tolerations():
|
||||||
tolerations = []
|
tolerations = []
|
||||||
for toleration in self.spec.get_node_tolerations():
|
for toleration in self.spec.get_node_tolerations():
|
||||||
# TODO add some input validation here
|
# TODO add some input validation here
|
||||||
toleration_key = toleration['key']
|
toleration_key = toleration["key"]
|
||||||
toleration_value = toleration['value']
|
toleration_value = toleration["value"]
|
||||||
tolerations.append(client.V1Toleration(
|
tolerations.append(
|
||||||
effect="NoSchedule",
|
client.V1Toleration(
|
||||||
key=toleration_key,
|
effect="NoSchedule",
|
||||||
operator="Equal",
|
key=toleration_key,
|
||||||
value=toleration_value
|
operator="Equal",
|
||||||
))
|
value=toleration_value,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
template = client.V1PodTemplateSpec(
|
template = client.V1PodTemplateSpec(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(annotations=annotations, labels=labels),
|
||||||
annotations=annotations,
|
|
||||||
labels=labels
|
|
||||||
),
|
|
||||||
spec=client.V1PodSpec(
|
spec=client.V1PodSpec(
|
||||||
containers=containers,
|
containers=containers,
|
||||||
image_pull_secrets=image_pull_secrets,
|
image_pull_secrets=image_pull_secrets,
|
||||||
volumes=volumes,
|
volumes=volumes,
|
||||||
affinity=affinity,
|
affinity=affinity,
|
||||||
tolerations=tolerations
|
tolerations=tolerations,
|
||||||
),
|
runtime_class_name=self.spec.get_runtime_class(),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
spec = client.V1DeploymentSpec(
|
spec = client.V1DeploymentSpec(
|
||||||
replicas=self.spec.get_replicas(),
|
replicas=self.spec.get_replicas(),
|
||||||
template=template, selector={
|
template=template,
|
||||||
"matchLabels":
|
selector={"matchLabels": {"app": self.app_name}},
|
||||||
{"app": self.app_name}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
deployment = client.V1Deployment(
|
deployment = client.V1Deployment(
|
||||||
|
|||||||
@ -16,14 +16,29 @@ from datetime import datetime, timezone
|
|||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from typing import List
|
from kubernetes.client.exceptions import ApiException
|
||||||
|
from typing import Any, Dict, List, Optional, cast
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||||
from stack_orchestrator.deploy.k8s.helpers import create_cluster, destroy_cluster, load_images_into_kind
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
from stack_orchestrator.deploy.k8s.helpers import install_ingress_for_kind, wait_for_ingress_in_kind
|
create_cluster,
|
||||||
from stack_orchestrator.deploy.k8s.helpers import pods_in_deployment, containers_in_pod, log_stream_from_string
|
destroy_cluster,
|
||||||
from stack_orchestrator.deploy.k8s.helpers import generate_kind_config
|
load_images_into_kind,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
|
install_ingress_for_kind,
|
||||||
|
wait_for_ingress_in_kind,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
|
pods_in_deployment,
|
||||||
|
containers_in_pod,
|
||||||
|
log_stream_from_string,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.k8s.helpers import (
|
||||||
|
generate_kind_config,
|
||||||
|
generate_high_memlock_spec_json,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
from stack_orchestrator.deploy.k8s.cluster_info import ClusterInfo
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
@ -36,7 +51,7 @@ class AttrDict(dict):
|
|||||||
self.__dict__ = self
|
self.__dict__ = self
|
||||||
|
|
||||||
|
|
||||||
def _check_delete_exception(e: client.exceptions.ApiException):
|
def _check_delete_exception(e: ApiException) -> None:
|
||||||
if e.status == 404:
|
if e.status == 404:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Failed to delete object, continuing")
|
print("Failed to delete object, continuing")
|
||||||
@ -44,6 +59,36 @@ def _check_delete_exception(e: client.exceptions.ApiException):
|
|||||||
error_exit(f"k8s api error: {e}")
|
error_exit(f"k8s api error: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def _create_runtime_class(name: str, handler: str):
|
||||||
|
"""Create a RuntimeClass resource for custom containerd runtime handlers.
|
||||||
|
|
||||||
|
RuntimeClass allows pods to specify which runtime handler to use, enabling
|
||||||
|
different pods to have different rlimit profiles (e.g., high-memlock).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: The name of the RuntimeClass resource
|
||||||
|
handler: The containerd runtime handler name
|
||||||
|
(must match containerdConfigPatches)
|
||||||
|
"""
|
||||||
|
api = client.NodeV1Api()
|
||||||
|
runtime_class = client.V1RuntimeClass(
|
||||||
|
api_version="node.k8s.io/v1",
|
||||||
|
kind="RuntimeClass",
|
||||||
|
metadata=client.V1ObjectMeta(name=name),
|
||||||
|
handler=handler,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
api.create_runtime_class(runtime_class)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Created RuntimeClass: {name}")
|
||||||
|
except ApiException as e:
|
||||||
|
if e.status == 409: # Already exists
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"RuntimeClass {name} already exists")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class K8sDeployer(Deployer):
|
class K8sDeployer(Deployer):
|
||||||
name: str = "k8s"
|
name: str = "k8s"
|
||||||
type: str
|
type: str
|
||||||
@ -57,18 +102,31 @@ class K8sDeployer(Deployer):
|
|||||||
deployment_dir: Path
|
deployment_dir: Path
|
||||||
deployment_context: DeploymentContext
|
deployment_context: DeploymentContext
|
||||||
|
|
||||||
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
type,
|
||||||
|
deployment_context: DeploymentContext,
|
||||||
|
compose_files,
|
||||||
|
compose_project_name,
|
||||||
|
compose_env_file,
|
||||||
|
) -> None:
|
||||||
self.type = type
|
self.type = type
|
||||||
self.skip_cluster_management = False
|
self.skip_cluster_management = False
|
||||||
# TODO: workaround pending refactoring above to cope with being created with a null deployment_context
|
# TODO: workaround pending refactoring above to cope with being
|
||||||
|
# created with a null deployment_context
|
||||||
if deployment_context is None:
|
if deployment_context is None:
|
||||||
return
|
return
|
||||||
self.deployment_dir = deployment_context.deployment_dir
|
self.deployment_dir = deployment_context.deployment_dir
|
||||||
self.deployment_context = deployment_context
|
self.deployment_context = deployment_context
|
||||||
self.kind_cluster_name = compose_project_name
|
self.kind_cluster_name = compose_project_name
|
||||||
self.cluster_info = ClusterInfo()
|
self.cluster_info = ClusterInfo()
|
||||||
self.cluster_info.int(compose_files, compose_env_file, compose_project_name, deployment_context.spec)
|
self.cluster_info.int(
|
||||||
if (opts.o.debug):
|
compose_files,
|
||||||
|
compose_env_file,
|
||||||
|
compose_project_name,
|
||||||
|
deployment_context.spec,
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
print(f"Deployment dir: {deployment_context.deployment_dir}")
|
print(f"Deployment dir: {deployment_context.deployment_dir}")
|
||||||
print(f"Compose files: {compose_files}")
|
print(f"Compose files: {compose_files}")
|
||||||
print(f"Project name: {compose_project_name}")
|
print(f"Project name: {compose_project_name}")
|
||||||
@ -80,7 +138,11 @@ class K8sDeployer(Deployer):
|
|||||||
config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
|
config.load_kube_config(context=f"kind-{self.kind_cluster_name}")
|
||||||
else:
|
else:
|
||||||
# Get the config file and pass to load_kube_config()
|
# Get the config file and pass to load_kube_config()
|
||||||
config.load_kube_config(config_file=self.deployment_dir.joinpath(constants.kube_config_filename).as_posix())
|
config.load_kube_config(
|
||||||
|
config_file=self.deployment_dir.joinpath(
|
||||||
|
constants.kube_config_filename
|
||||||
|
).as_posix()
|
||||||
|
)
|
||||||
self.core_api = client.CoreV1Api()
|
self.core_api = client.CoreV1Api()
|
||||||
self.networking_api = client.NetworkingV1Api()
|
self.networking_api = client.NetworkingV1Api()
|
||||||
self.apps_api = client.AppsV1Api()
|
self.apps_api = client.AppsV1Api()
|
||||||
@ -94,7 +156,9 @@ class K8sDeployer(Deployer):
|
|||||||
print(f"Sending this pv: {pv}")
|
print(f"Sending this pv: {pv}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
pv_resp = self.core_api.read_persistent_volume(name=pv.metadata.name)
|
pv_resp = self.core_api.read_persistent_volume(
|
||||||
|
name=pv.metadata.name
|
||||||
|
)
|
||||||
if pv_resp:
|
if pv_resp:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVs already present:")
|
print("PVs already present:")
|
||||||
@ -117,7 +181,8 @@ class K8sDeployer(Deployer):
|
|||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
|
pvc_resp = self.core_api.read_namespaced_persistent_volume_claim(
|
||||||
name=pvc.metadata.name, namespace=self.k8s_namespace)
|
name=pvc.metadata.name, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
if pvc_resp:
|
if pvc_resp:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs already present:")
|
print("PVCs already present:")
|
||||||
@ -126,7 +191,9 @@ class K8sDeployer(Deployer):
|
|||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
pass
|
pass
|
||||||
|
|
||||||
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(body=pvc, namespace=self.k8s_namespace)
|
pvc_resp = self.core_api.create_namespaced_persistent_volume_claim(
|
||||||
|
body=pvc, namespace=self.k8s_namespace
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs created:")
|
print("PVCs created:")
|
||||||
print(f"{pvc_resp}")
|
print(f"{pvc_resp}")
|
||||||
@ -138,8 +205,7 @@ class K8sDeployer(Deployer):
|
|||||||
print(f"Sending this ConfigMap: {cfg_map}")
|
print(f"Sending this ConfigMap: {cfg_map}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
cfg_rsp = self.core_api.create_namespaced_config_map(
|
cfg_rsp = self.core_api.create_namespaced_config_map(
|
||||||
body=cfg_map,
|
body=cfg_map, namespace=self.k8s_namespace
|
||||||
namespace=self.k8s_namespace
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("ConfigMap created:")
|
print("ConfigMap created:")
|
||||||
@ -147,26 +213,37 @@ class K8sDeployer(Deployer):
|
|||||||
|
|
||||||
def _create_deployment(self):
|
def _create_deployment(self):
|
||||||
# Process compose files into a Deployment
|
# Process compose files into a Deployment
|
||||||
deployment = self.cluster_info.get_deployment(image_pull_policy=None if self.is_kind() else "Always")
|
deployment = self.cluster_info.get_deployment(
|
||||||
|
image_pull_policy=None if self.is_kind() else "Always"
|
||||||
|
)
|
||||||
# Create the k8s objects
|
# Create the k8s objects
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this deployment: {deployment}")
|
print(f"Sending this deployment: {deployment}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
deployment_resp = self.apps_api.create_namespaced_deployment(
|
deployment_resp = cast(
|
||||||
body=deployment, namespace=self.k8s_namespace
|
client.V1Deployment,
|
||||||
|
self.apps_api.create_namespaced_deployment(
|
||||||
|
body=deployment, namespace=self.k8s_namespace
|
||||||
|
),
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Deployment created:")
|
print("Deployment created:")
|
||||||
print(f"{deployment_resp.metadata.namespace} {deployment_resp.metadata.name} \
|
meta = deployment_resp.metadata
|
||||||
{deployment_resp.metadata.generation} {deployment_resp.spec.template.spec.containers[0].image}")
|
spec = deployment_resp.spec
|
||||||
|
if meta and spec and spec.template.spec:
|
||||||
|
ns = meta.namespace
|
||||||
|
name = meta.name
|
||||||
|
gen = meta.generation
|
||||||
|
containers = spec.template.spec.containers
|
||||||
|
img = containers[0].image if containers else None
|
||||||
|
print(f"{ns} {name} {gen} {img}")
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service = self.cluster_info.get_service()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this service: {service}")
|
print(f"Sending this service: {service}")
|
||||||
if not opts.o.dry_run:
|
if service and not opts.o.dry_run:
|
||||||
service_resp = self.core_api.create_namespaced_service(
|
service_resp = self.core_api.create_namespaced_service(
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace, body=service
|
||||||
body=service
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Service created:")
|
print("Service created:")
|
||||||
@ -177,7 +254,7 @@ class K8sDeployer(Deployer):
|
|||||||
group="cert-manager.io",
|
group="cert-manager.io",
|
||||||
version="v1",
|
version="v1",
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
plural="certificates"
|
plural="certificates",
|
||||||
)
|
)
|
||||||
|
|
||||||
host_parts = host_name.split(".", 1)
|
host_parts = host_name.split(".", 1)
|
||||||
@ -202,7 +279,9 @@ class K8sDeployer(Deployer):
|
|||||||
if before < now < after:
|
if before < now < after:
|
||||||
# Check the status is Ready
|
# Check the status is Ready
|
||||||
for condition in status.get("conditions", []):
|
for condition in status.get("conditions", []):
|
||||||
if "True" == condition.get("status") and "Ready" == condition.get("type"):
|
if "True" == condition.get(
|
||||||
|
"status"
|
||||||
|
) and "Ready" == condition.get("type"):
|
||||||
return cert
|
return cert
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -211,15 +290,27 @@ class K8sDeployer(Deployer):
|
|||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Create the kind cluster
|
# Create the kind cluster
|
||||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
create_cluster(
|
||||||
|
self.kind_cluster_name,
|
||||||
|
str(self.deployment_dir.joinpath(constants.kind_config_filename)),
|
||||||
|
)
|
||||||
# Ensure the referenced containers are copied into kind
|
# Ensure the referenced containers are copied into kind
|
||||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
load_images_into_kind(
|
||||||
|
self.kind_cluster_name, self.cluster_info.image_set
|
||||||
|
)
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
if self.is_kind() and not self.skip_cluster_management:
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Now configure an ingress controller (not installed by default in kind)
|
# Configure ingress controller (not installed by default in kind)
|
||||||
install_ingress_for_kind()
|
install_ingress_for_kind()
|
||||||
# Wait for ingress to start (deployment provisioning will fail unless this is done)
|
# Wait for ingress to start
|
||||||
|
# (deployment provisioning will fail unless this is done)
|
||||||
wait_for_ingress_in_kind()
|
wait_for_ingress_in_kind()
|
||||||
|
# Create RuntimeClass if unlimited_memlock is enabled
|
||||||
|
if self.cluster_info.spec.get_unlimited_memlock():
|
||||||
|
_create_runtime_class(
|
||||||
|
constants.high_memlock_runtime,
|
||||||
|
constants.high_memlock_runtime,
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Dry run mode enabled, skipping k8s API connect")
|
print("Dry run mode enabled, skipping k8s API connect")
|
||||||
@ -228,21 +319,26 @@ class K8sDeployer(Deployer):
|
|||||||
self._create_deployment()
|
self._create_deployment()
|
||||||
|
|
||||||
http_proxy_info = self.cluster_info.spec.get_http_proxy()
|
http_proxy_info = self.cluster_info.spec.get_http_proxy()
|
||||||
# Note: at present we don't support tls for kind (and enabling tls causes errors)
|
# Note: we don't support tls for kind (enabling tls causes errors)
|
||||||
use_tls = http_proxy_info and not self.is_kind()
|
use_tls = http_proxy_info and not self.is_kind()
|
||||||
certificate = self._find_certificate_for_host_name(http_proxy_info[0]["host-name"]) if use_tls else None
|
certificate = (
|
||||||
|
self._find_certificate_for_host_name(http_proxy_info[0]["host-name"])
|
||||||
|
if use_tls
|
||||||
|
else None
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
if certificate:
|
if certificate:
|
||||||
print(f"Using existing certificate: {certificate}")
|
print(f"Using existing certificate: {certificate}")
|
||||||
|
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=use_tls, certificate=certificate)
|
ingress = self.cluster_info.get_ingress(
|
||||||
|
use_tls=use_tls, certificate=certificate
|
||||||
|
)
|
||||||
if ingress:
|
if ingress:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this ingress: {ingress}")
|
print(f"Sending this ingress: {ingress}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
ingress_resp = self.networking_api.create_namespaced_ingress(
|
ingress_resp = self.networking_api.create_namespaced_ingress(
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace, body=ingress
|
||||||
body=ingress
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Ingress created:")
|
print("Ingress created:")
|
||||||
@ -257,8 +353,7 @@ class K8sDeployer(Deployer):
|
|||||||
print(f"Sending this nodeport: {nodeport}")
|
print(f"Sending this nodeport: {nodeport}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
nodeport_resp = self.core_api.create_namespaced_service(
|
nodeport_resp = self.core_api.create_namespaced_service(
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace, body=nodeport
|
||||||
body=nodeport
|
|
||||||
)
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("NodePort created:")
|
print("NodePort created:")
|
||||||
@ -276,11 +371,13 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this pv: {pv}")
|
print(f"Deleting this pv: {pv}")
|
||||||
try:
|
try:
|
||||||
pv_resp = self.core_api.delete_persistent_volume(name=pv.metadata.name)
|
pv_resp = self.core_api.delete_persistent_volume(
|
||||||
|
name=pv.metadata.name
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PV deleted:")
|
print("PV deleted:")
|
||||||
print(f"{pv_resp}")
|
print(f"{pv_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
# Figure out the PVCs for this deployment
|
# Figure out the PVCs for this deployment
|
||||||
@ -295,7 +392,7 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("PVCs deleted:")
|
print("PVCs deleted:")
|
||||||
print(f"{pvc_resp}")
|
print(f"{pvc_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
# Figure out the ConfigMaps for this deployment
|
# Figure out the ConfigMaps for this deployment
|
||||||
@ -310,39 +407,40 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("ConfigMap deleted:")
|
print("ConfigMap deleted:")
|
||||||
print(f"{cfg_map_resp}")
|
print(f"{cfg_map_resp}")
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
deployment = self.cluster_info.get_deployment()
|
deployment = self.cluster_info.get_deployment()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this deployment: {deployment}")
|
print(f"Deleting this deployment: {deployment}")
|
||||||
try:
|
if deployment and deployment.metadata and deployment.metadata.name:
|
||||||
self.apps_api.delete_namespaced_deployment(
|
try:
|
||||||
name=deployment.metadata.name, namespace=self.k8s_namespace
|
self.apps_api.delete_namespaced_deployment(
|
||||||
)
|
name=deployment.metadata.name, namespace=self.k8s_namespace
|
||||||
except client.exceptions.ApiException as e:
|
)
|
||||||
_check_delete_exception(e)
|
except ApiException as e:
|
||||||
|
_check_delete_exception(e)
|
||||||
|
|
||||||
service: client.V1Service = self.cluster_info.get_service()
|
service = self.cluster_info.get_service()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting service: {service}")
|
print(f"Deleting service: {service}")
|
||||||
try:
|
if service and service.metadata and service.metadata.name:
|
||||||
self.core_api.delete_namespaced_service(
|
try:
|
||||||
namespace=self.k8s_namespace,
|
self.core_api.delete_namespaced_service(
|
||||||
name=service.metadata.name
|
namespace=self.k8s_namespace, name=service.metadata.name
|
||||||
)
|
)
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
|
|
||||||
ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind())
|
||||||
if ingress:
|
if ingress and ingress.metadata and ingress.metadata.name:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this ingress: {ingress}")
|
print(f"Deleting this ingress: {ingress}")
|
||||||
try:
|
try:
|
||||||
self.networking_api.delete_namespaced_ingress(
|
self.networking_api.delete_namespaced_ingress(
|
||||||
name=ingress.metadata.name, namespace=self.k8s_namespace
|
name=ingress.metadata.name, namespace=self.k8s_namespace
|
||||||
)
|
)
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -352,13 +450,13 @@ class K8sDeployer(Deployer):
|
|||||||
for nodeport in nodeports:
|
for nodeport in nodeports:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this nodeport: {nodeport}")
|
print(f"Deleting this nodeport: {nodeport}")
|
||||||
try:
|
if nodeport.metadata and nodeport.metadata.name:
|
||||||
self.core_api.delete_namespaced_service(
|
try:
|
||||||
namespace=self.k8s_namespace,
|
self.core_api.delete_namespaced_service(
|
||||||
name=nodeport.metadata.name
|
namespace=self.k8s_namespace, name=nodeport.metadata.name
|
||||||
)
|
)
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
_check_delete_exception(e)
|
_check_delete_exception(e)
|
||||||
else:
|
else:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No nodeport to delete")
|
print("No nodeport to delete")
|
||||||
@ -375,8 +473,9 @@ class K8sDeployer(Deployer):
|
|||||||
|
|
||||||
if all_pods.items:
|
if all_pods.items:
|
||||||
for p in all_pods.items:
|
for p in all_pods.items:
|
||||||
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
if p.metadata and p.metadata.name:
|
||||||
pods.append(p)
|
if f"{self.cluster_info.app_name}-deployment" in p.metadata.name:
|
||||||
|
pods.append(p)
|
||||||
|
|
||||||
if not pods:
|
if not pods:
|
||||||
return
|
return
|
||||||
@ -385,21 +484,40 @@ class K8sDeployer(Deployer):
|
|||||||
ip = "?"
|
ip = "?"
|
||||||
tls = "?"
|
tls = "?"
|
||||||
try:
|
try:
|
||||||
ingress = self.networking_api.read_namespaced_ingress(namespace=self.k8s_namespace,
|
cluster_ingress = self.cluster_info.get_ingress()
|
||||||
name=self.cluster_info.get_ingress().metadata.name)
|
if cluster_ingress is None or cluster_ingress.metadata is None:
|
||||||
|
return
|
||||||
|
ingress = cast(
|
||||||
|
client.V1Ingress,
|
||||||
|
self.networking_api.read_namespaced_ingress(
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
name=cluster_ingress.metadata.name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if not ingress.spec or not ingress.spec.tls or not ingress.spec.rules:
|
||||||
|
return
|
||||||
|
|
||||||
cert = self.custom_obj_api.get_namespaced_custom_object(
|
cert = cast(
|
||||||
group="cert-manager.io",
|
Dict[str, Any],
|
||||||
version="v1",
|
self.custom_obj_api.get_namespaced_custom_object(
|
||||||
namespace=self.k8s_namespace,
|
group="cert-manager.io",
|
||||||
plural="certificates",
|
version="v1",
|
||||||
name=ingress.spec.tls[0].secret_name
|
namespace=self.k8s_namespace,
|
||||||
|
plural="certificates",
|
||||||
|
name=ingress.spec.tls[0].secret_name,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
hostname = ingress.spec.rules[0].host
|
hostname = ingress.spec.rules[0].host
|
||||||
ip = ingress.status.load_balancer.ingress[0].ip
|
if ingress.status and ingress.status.load_balancer:
|
||||||
|
lb_ingress = ingress.status.load_balancer.ingress
|
||||||
|
if lb_ingress:
|
||||||
|
ip = lb_ingress[0].ip or "?"
|
||||||
|
cert_status = cert.get("status", {})
|
||||||
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
tls = "notBefore: %s; notAfter: %s; names: %s" % (
|
||||||
cert["status"]["notBefore"], cert["status"]["notAfter"], ingress.spec.tls[0].hosts
|
cert_status.get("notBefore", "?"),
|
||||||
|
cert_status.get("notAfter", "?"),
|
||||||
|
ingress.spec.tls[0].hosts,
|
||||||
)
|
)
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
pass
|
pass
|
||||||
@ -412,10 +530,16 @@ class K8sDeployer(Deployer):
|
|||||||
print("Pods:")
|
print("Pods:")
|
||||||
|
|
||||||
for p in pods:
|
for p in pods:
|
||||||
|
if not p.metadata:
|
||||||
|
continue
|
||||||
|
ns = p.metadata.namespace
|
||||||
|
name = p.metadata.name
|
||||||
if p.metadata.deletion_timestamp:
|
if p.metadata.deletion_timestamp:
|
||||||
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Terminating ({p.metadata.deletion_timestamp})")
|
ts = p.metadata.deletion_timestamp
|
||||||
|
print(f"\t{ns}/{name}: Terminating ({ts})")
|
||||||
else:
|
else:
|
||||||
print(f"\t{p.metadata.namespace}/{p.metadata.name}: Running ({p.metadata.creation_timestamp})")
|
ts = p.metadata.creation_timestamp
|
||||||
|
print(f"\t{ns}/{name}: Running ({ts})")
|
||||||
|
|
||||||
def ps(self):
|
def ps(self):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
@ -430,19 +554,22 @@ class K8sDeployer(Deployer):
|
|||||||
for c in p.spec.containers:
|
for c in p.spec.containers:
|
||||||
if c.ports:
|
if c.ports:
|
||||||
for prt in c.ports:
|
for prt in c.ports:
|
||||||
ports[str(prt.container_port)] = [AttrDict({
|
ports[str(prt.container_port)] = [
|
||||||
"HostIp": pod_ip,
|
AttrDict(
|
||||||
"HostPort": prt.container_port
|
{"HostIp": pod_ip, "HostPort": prt.container_port}
|
||||||
})]
|
)
|
||||||
|
]
|
||||||
|
|
||||||
ret.append(AttrDict({
|
ret.append(
|
||||||
"id": f"{p.metadata.namespace}/{p.metadata.name}",
|
AttrDict(
|
||||||
"name": p.metadata.name,
|
{
|
||||||
"namespace": p.metadata.namespace,
|
"id": f"{p.metadata.namespace}/{p.metadata.name}",
|
||||||
"network_settings": AttrDict({
|
"name": p.metadata.name,
|
||||||
"ports": ports
|
"namespace": p.metadata.namespace,
|
||||||
})
|
"network_settings": AttrDict({"ports": ports}),
|
||||||
}))
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -465,15 +592,17 @@ class K8sDeployer(Deployer):
|
|||||||
else:
|
else:
|
||||||
k8s_pod_name = pods[0]
|
k8s_pod_name = pods[0]
|
||||||
containers = containers_in_pod(self.core_api, k8s_pod_name)
|
containers = containers_in_pod(self.core_api, k8s_pod_name)
|
||||||
# If the pod is not yet started, the logs request below will throw an exception
|
# If pod not started, logs request below will throw an exception
|
||||||
try:
|
try:
|
||||||
log_data = ""
|
log_data = ""
|
||||||
for container in containers:
|
for container in containers:
|
||||||
container_log = self.core_api.read_namespaced_pod_log(k8s_pod_name, namespace="default", container=container)
|
container_log = self.core_api.read_namespaced_pod_log(
|
||||||
|
k8s_pod_name, namespace="default", container=container
|
||||||
|
)
|
||||||
container_log_lines = container_log.splitlines()
|
container_log_lines = container_log.splitlines()
|
||||||
for line in container_log_lines:
|
for line in container_log_lines:
|
||||||
log_data += f"{container}: {line}\n"
|
log_data += f"{container}: {line}\n"
|
||||||
except client.exceptions.ApiException as e:
|
except ApiException as e:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Error from read_namespaced_pod_log: {e}")
|
print(f"Error from read_namespaced_pod_log: {e}")
|
||||||
log_data = "******* No logs available ********\n"
|
log_data = "******* No logs available ********\n"
|
||||||
@ -482,34 +611,85 @@ class K8sDeployer(Deployer):
|
|||||||
def update(self):
|
def update(self):
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
ref_deployment = self.cluster_info.get_deployment()
|
ref_deployment = self.cluster_info.get_deployment()
|
||||||
|
if not ref_deployment or not ref_deployment.metadata:
|
||||||
|
return
|
||||||
|
ref_name = ref_deployment.metadata.name
|
||||||
|
if not ref_name:
|
||||||
|
return
|
||||||
|
|
||||||
deployment = self.apps_api.read_namespaced_deployment(
|
deployment = cast(
|
||||||
name=ref_deployment.metadata.name,
|
client.V1Deployment,
|
||||||
namespace=self.k8s_namespace
|
self.apps_api.read_namespaced_deployment(
|
||||||
|
name=ref_name, namespace=self.k8s_namespace
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
if not deployment.spec or not deployment.spec.template:
|
||||||
|
return
|
||||||
|
template_spec = deployment.spec.template.spec
|
||||||
|
if not template_spec or not template_spec.containers:
|
||||||
|
return
|
||||||
|
|
||||||
new_env = ref_deployment.spec.template.spec.containers[0].env
|
ref_spec = ref_deployment.spec
|
||||||
for container in deployment.spec.template.spec.containers:
|
if ref_spec and ref_spec.template and ref_spec.template.spec:
|
||||||
old_env = container.env
|
ref_containers = ref_spec.template.spec.containers
|
||||||
if old_env != new_env:
|
if ref_containers:
|
||||||
container.env = new_env
|
new_env = ref_containers[0].env
|
||||||
|
for container in template_spec.containers:
|
||||||
|
old_env = container.env
|
||||||
|
if old_env != new_env:
|
||||||
|
container.env = new_env
|
||||||
|
|
||||||
deployment.spec.template.metadata.annotations = {
|
template_meta = deployment.spec.template.metadata
|
||||||
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
if template_meta:
|
||||||
.replace(tzinfo=timezone.utc)
|
template_meta.annotations = {
|
||||||
.isoformat()
|
"kubectl.kubernetes.io/restartedAt": datetime.utcnow()
|
||||||
}
|
.replace(tzinfo=timezone.utc)
|
||||||
|
.isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
self.apps_api.patch_namespaced_deployment(
|
self.apps_api.patch_namespaced_deployment(
|
||||||
name=ref_deployment.metadata.name,
|
name=ref_name,
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
body=deployment
|
body=deployment,
|
||||||
)
|
)
|
||||||
|
|
||||||
def run(self, image: str, command=None, user=None, volumes=None, entrypoint=None, env={}, ports=[], detach=False):
|
def run(
|
||||||
|
self,
|
||||||
|
image: str,
|
||||||
|
command=None,
|
||||||
|
user=None,
|
||||||
|
volumes=None,
|
||||||
|
entrypoint=None,
|
||||||
|
env={},
|
||||||
|
ports=[],
|
||||||
|
detach=False,
|
||||||
|
):
|
||||||
# We need to figure out how to do this -- check why we're being called first
|
# We need to figure out how to do this -- check why we're being called first
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def run_job(self, job_name: str, helm_release: Optional[str] = None):
|
||||||
|
if not opts.o.dry_run:
|
||||||
|
from stack_orchestrator.deploy.k8s.helm.job_runner import run_helm_job
|
||||||
|
|
||||||
|
# Check if this is a helm-based deployment
|
||||||
|
chart_dir = self.deployment_dir / "chart"
|
||||||
|
if not chart_dir.exists():
|
||||||
|
# TODO: Implement job support for compose-based K8s deployments
|
||||||
|
raise Exception(
|
||||||
|
f"Job support is only available for helm-based "
|
||||||
|
f"deployments. Chart directory not found: {chart_dir}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the job using the helm job runner
|
||||||
|
run_helm_job(
|
||||||
|
chart_dir=chart_dir,
|
||||||
|
job_name=job_name,
|
||||||
|
release=helm_release,
|
||||||
|
namespace=self.k8s_namespace,
|
||||||
|
timeout=600,
|
||||||
|
verbose=opts.o.verbose,
|
||||||
|
)
|
||||||
|
|
||||||
def is_kind(self):
|
def is_kind(self):
|
||||||
return self.type == "k8s-kind"
|
return self.type == "k8s-kind"
|
||||||
|
|
||||||
@ -525,6 +705,20 @@ class K8sDeployerConfigGenerator(DeployerConfigGenerator):
|
|||||||
def generate(self, deployment_dir: Path):
|
def generate(self, deployment_dir: Path):
|
||||||
# No need to do this for the remote k8s case
|
# No need to do this for the remote k8s case
|
||||||
if self.type == "k8s-kind":
|
if self.type == "k8s-kind":
|
||||||
|
# Generate high-memlock-spec.json if unlimited_memlock is enabled.
|
||||||
|
# Must be done before generate_kind_config() which references it.
|
||||||
|
if self.deployment_context.spec.get_unlimited_memlock():
|
||||||
|
spec_content = generate_high_memlock_spec_json()
|
||||||
|
spec_file = deployment_dir.joinpath(
|
||||||
|
constants.high_memlock_spec_filename
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(
|
||||||
|
f"Creating high-memlock spec for unlimited memlock: {spec_file}"
|
||||||
|
)
|
||||||
|
with open(spec_file, "w") as output_file:
|
||||||
|
output_file.write(spec_content)
|
||||||
|
|
||||||
# Check the file isn't already there
|
# Check the file isn't already there
|
||||||
# Get the config file contents
|
# Get the config file contents
|
||||||
content = generate_kind_config(deployment_dir, self.deployment_context)
|
content = generate_kind_config(deployment_dir, self.deployment_context)
|
||||||
|
|||||||
14
stack_orchestrator/deploy/k8s/helm/__init__.py
Normal file
14
stack_orchestrator/deploy/k8s/helm/__init__.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Copyright © 2025 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
336
stack_orchestrator/deploy/k8s/helm/chart_generator.py
Normal file
336
stack_orchestrator/deploy/k8s/helm/chart_generator.py
Normal file
@ -0,0 +1,336 @@
|
|||||||
|
# Copyright © 2025 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from stack_orchestrator import constants
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import (
|
||||||
|
get_parsed_stack_config,
|
||||||
|
get_pod_list,
|
||||||
|
get_pod_file_path,
|
||||||
|
get_job_list,
|
||||||
|
get_job_file_path,
|
||||||
|
error_exit,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.deploy.k8s.helm.kompose_wrapper import (
|
||||||
|
check_kompose_available,
|
||||||
|
get_kompose_version,
|
||||||
|
convert_to_helm_chart,
|
||||||
|
)
|
||||||
|
from stack_orchestrator.util import get_yaml
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_job_templates_with_conditionals(chart_dir: Path, jobs: list) -> None:
|
||||||
|
"""
|
||||||
|
Wrap job templates with conditional checks so they are not created by default.
|
||||||
|
Jobs will only be created when explicitly enabled via --set jobs.<name>.enabled=true
|
||||||
|
"""
|
||||||
|
templates_dir = chart_dir / "templates"
|
||||||
|
if not templates_dir.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
for job_name in jobs:
|
||||||
|
# Find job template file (kompose generates <service-name>-job.yaml)
|
||||||
|
job_template_file = templates_dir / f"{job_name}-job.yaml"
|
||||||
|
|
||||||
|
if not job_template_file.exists():
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Warning: Job template not found: {job_template_file}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Read the template content
|
||||||
|
content = job_template_file.read_text()
|
||||||
|
|
||||||
|
# Wrap with conditional (default false)
|
||||||
|
# Use 'index' function to handle job names with dashes
|
||||||
|
# Provide default dict for .Values.jobs to handle case where it doesn't exist
|
||||||
|
condition = (
|
||||||
|
f"{{{{- if (index (.Values.jobs | default dict) "
|
||||||
|
f'"{job_name}" | default dict).enabled | default false }}}}'
|
||||||
|
)
|
||||||
|
wrapped_content = f"""{condition}
|
||||||
|
{content}{{{{- end }}}}
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Write back
|
||||||
|
job_template_file.write_text(wrapped_content)
|
||||||
|
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Wrapped job template with conditional: {job_template_file.name}")
|
||||||
|
|
||||||
|
|
||||||
|
def _post_process_chart(chart_dir: Path, chart_name: str, jobs: list) -> None:
|
||||||
|
"""
|
||||||
|
Post-process Kompose-generated chart to fix common issues.
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
1. Chart.yaml name, description and keywords
|
||||||
|
2. Add conditional wrappers to job templates (default: disabled)
|
||||||
|
|
||||||
|
TODO:
|
||||||
|
- Add defaultMode: 0755 to ConfigMap volumes containing scripts (.sh files)
|
||||||
|
"""
|
||||||
|
yaml = get_yaml()
|
||||||
|
|
||||||
|
# Fix Chart.yaml
|
||||||
|
chart_yaml_path = chart_dir / "Chart.yaml"
|
||||||
|
if chart_yaml_path.exists():
|
||||||
|
chart_yaml = yaml.load(open(chart_yaml_path, "r"))
|
||||||
|
|
||||||
|
# Fix name
|
||||||
|
chart_yaml["name"] = chart_name
|
||||||
|
|
||||||
|
# Fix description
|
||||||
|
chart_yaml["description"] = f"Generated Helm chart for {chart_name} stack"
|
||||||
|
|
||||||
|
# Fix keywords
|
||||||
|
if "keywords" in chart_yaml and isinstance(chart_yaml["keywords"], list):
|
||||||
|
chart_yaml["keywords"] = [chart_name]
|
||||||
|
|
||||||
|
with open(chart_yaml_path, "w") as f:
|
||||||
|
yaml.dump(chart_yaml, f)
|
||||||
|
|
||||||
|
# Process job templates: wrap with conditionals (default disabled)
|
||||||
|
if jobs:
|
||||||
|
_wrap_job_templates_with_conditionals(chart_dir, jobs)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_helm_chart(
|
||||||
|
stack_path: str, spec_file: str, deployment_dir_path: Path
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Generate a self-sufficient Helm chart from stack compose files using Kompose.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
stack_path: Path to the stack directory
|
||||||
|
spec_file: Path to the deployment spec file
|
||||||
|
deployment_dir_path: Deployment directory path
|
||||||
|
(already created with deployment.yml)
|
||||||
|
|
||||||
|
Output structure:
|
||||||
|
deployment-dir/
|
||||||
|
├── deployment.yml # Contains cluster-id
|
||||||
|
├── spec.yml # Reference
|
||||||
|
├── stack.yml # Reference
|
||||||
|
└── chart/ # Self-sufficient Helm chart
|
||||||
|
├── Chart.yaml
|
||||||
|
├── README.md
|
||||||
|
└── templates/
|
||||||
|
└── *.yaml
|
||||||
|
|
||||||
|
TODO: Enhancements:
|
||||||
|
- Convert Deployments to StatefulSets for stateful services (zenithd, postgres)
|
||||||
|
- Add _helpers.tpl with common label/selector functions
|
||||||
|
- Enhance Chart.yaml with proper metadata (version, description, etc.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
parsed_stack = get_parsed_stack_config(stack_path)
|
||||||
|
if parsed_stack is None:
|
||||||
|
error_exit(f"Failed to parse stack config: {stack_path}")
|
||||||
|
stack_name = parsed_stack.get("name", stack_path)
|
||||||
|
|
||||||
|
# 1. Check Kompose availability
|
||||||
|
if not check_kompose_available():
|
||||||
|
error_exit("kompose not found in PATH.\n")
|
||||||
|
|
||||||
|
# 2. Read cluster-id from deployment.yml
|
||||||
|
deployment_file = deployment_dir_path / constants.deployment_file_name
|
||||||
|
if not deployment_file.exists():
|
||||||
|
error_exit(f"Deployment file not found: {deployment_file}")
|
||||||
|
|
||||||
|
yaml = get_yaml()
|
||||||
|
deployment_config = yaml.load(open(deployment_file, "r"))
|
||||||
|
cluster_id = deployment_config.get(constants.cluster_id_key)
|
||||||
|
if not cluster_id:
|
||||||
|
error_exit(f"cluster-id not found in {deployment_file}")
|
||||||
|
|
||||||
|
# 3. Derive chart name from stack name + cluster-id suffix
|
||||||
|
# Sanitize stack name for use in chart name
|
||||||
|
sanitized_stack_name = stack_name.replace("_", "-").replace(" ", "-")
|
||||||
|
|
||||||
|
# Extract hex suffix from cluster-id (after the prefix)
|
||||||
|
# cluster-id format: "laconic-<hex>" -> extract the hex part
|
||||||
|
cluster_id_suffix = cluster_id.split("-", 1)[1] if "-" in cluster_id else cluster_id
|
||||||
|
|
||||||
|
# Combine to create human-readable + unique chart name
|
||||||
|
chart_name = f"{sanitized_stack_name}-{cluster_id_suffix}"
|
||||||
|
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Cluster ID: {cluster_id}")
|
||||||
|
print(f"Chart name: {chart_name}")
|
||||||
|
|
||||||
|
# 4. Get compose files from stack (pods + jobs)
|
||||||
|
pods = get_pod_list(parsed_stack)
|
||||||
|
if not pods:
|
||||||
|
error_exit(f"No pods found in stack: {stack_path}")
|
||||||
|
|
||||||
|
jobs = get_job_list(parsed_stack)
|
||||||
|
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Found {len(pods)} pod(s) in stack: {pods}")
|
||||||
|
if jobs:
|
||||||
|
print(f"Found {len(jobs)} job(s) in stack: {jobs}")
|
||||||
|
|
||||||
|
compose_files = []
|
||||||
|
for pod in pods:
|
||||||
|
pod_file = get_pod_file_path(stack_path, parsed_stack, pod)
|
||||||
|
if pod_file is None:
|
||||||
|
error_exit(f"Pod file path not found for pod: {pod}")
|
||||||
|
pod_file_path = Path(pod_file) if isinstance(pod_file, str) else pod_file
|
||||||
|
if not pod_file_path.exists():
|
||||||
|
error_exit(f"Pod file not found: {pod_file_path}")
|
||||||
|
compose_files.append(pod_file_path)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Found compose file: {pod_file_path.name}")
|
||||||
|
|
||||||
|
# Add job compose files
|
||||||
|
job_files = []
|
||||||
|
for job in jobs:
|
||||||
|
job_file = get_job_file_path(stack_path, parsed_stack, job)
|
||||||
|
if job_file is None:
|
||||||
|
error_exit(f"Job file path not found for job: {job}")
|
||||||
|
job_file_path = Path(job_file) if isinstance(job_file, str) else job_file
|
||||||
|
if not job_file_path.exists():
|
||||||
|
error_exit(f"Job file not found: {job_file_path}")
|
||||||
|
compose_files.append(job_file_path)
|
||||||
|
job_files.append(job_file_path)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Found job compose file: {job_file_path.name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
version = get_kompose_version()
|
||||||
|
print(f"Using kompose version: {version}")
|
||||||
|
except Exception as e:
|
||||||
|
error_exit(f"Failed to get kompose version: {e}")
|
||||||
|
|
||||||
|
# 5. Create chart directory and invoke Kompose
|
||||||
|
chart_dir = deployment_dir_path / "chart"
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"Converting {len(compose_files)} compose file(s) to Helm chart "
|
||||||
|
"using Kompose..."
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
output = convert_to_helm_chart(
|
||||||
|
compose_files=compose_files, output_dir=chart_dir, chart_name=chart_name
|
||||||
|
)
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Kompose output:\n{output}")
|
||||||
|
except Exception as e:
|
||||||
|
error_exit(f"Helm chart generation failed: {e}")
|
||||||
|
|
||||||
|
# 6. Post-process generated chart
|
||||||
|
_post_process_chart(chart_dir, chart_name, jobs)
|
||||||
|
|
||||||
|
# 7. Generate README.md with basic installation instructions
|
||||||
|
readme_content = f"""# {chart_name} Helm Chart
|
||||||
|
|
||||||
|
Generated by laconic-so from stack: `{stack_path}`
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Kubernetes cluster (v1.27+)
|
||||||
|
- Helm (v3.12+)
|
||||||
|
- kubectl configured to access your cluster
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install the chart
|
||||||
|
helm install {chart_name} {chart_dir}
|
||||||
|
|
||||||
|
# Alternatively, install with your own release name
|
||||||
|
# helm install <your-release-name> {chart_dir}
|
||||||
|
|
||||||
|
# Check deployment status
|
||||||
|
kubectl get pods
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upgrade
|
||||||
|
|
||||||
|
To apply changes made to chart, perform upgrade:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm upgrade {chart_name} {chart_dir}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstallation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm uninstall {chart_name}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
The chart was generated from Docker Compose files using Kompose.
|
||||||
|
|
||||||
|
### Customization
|
||||||
|
|
||||||
|
Edit the generated template files in `templates/` to customize:
|
||||||
|
- Image repositories and tags
|
||||||
|
- Resource limits (CPU, memory)
|
||||||
|
- Persistent volume sizes
|
||||||
|
- Replica counts
|
||||||
|
"""
|
||||||
|
|
||||||
|
readme_path = chart_dir / "README.md"
|
||||||
|
readme_path.write_text(readme_content)
|
||||||
|
|
||||||
|
if opts.o.debug:
|
||||||
|
print(f"Generated README: {readme_path}")
|
||||||
|
|
||||||
|
# 7. Success message
|
||||||
|
print(f"\n{'=' * 60}")
|
||||||
|
print("✓ Helm chart generated successfully!")
|
||||||
|
print(f"{'=' * 60}")
|
||||||
|
print("\nChart details:")
|
||||||
|
print(f" Name: {chart_name}")
|
||||||
|
print(f" Location: {chart_dir.absolute()}")
|
||||||
|
print(f" Stack: {stack_path}")
|
||||||
|
|
||||||
|
# Count generated files
|
||||||
|
template_files = (
|
||||||
|
list((chart_dir / "templates").glob("*.yaml"))
|
||||||
|
if (chart_dir / "templates").exists()
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
print(f" Files: {len(template_files)} template(s) generated")
|
||||||
|
|
||||||
|
print("\nDeployment directory structure:")
|
||||||
|
print(f" {deployment_dir_path}/")
|
||||||
|
print(" ├── deployment.yml (cluster-id)")
|
||||||
|
print(" ├── spec.yml (reference)")
|
||||||
|
print(" ├── stack.yml (reference)")
|
||||||
|
print(" └── chart/ (self-sufficient Helm chart)")
|
||||||
|
|
||||||
|
print("\nNext steps:")
|
||||||
|
print(" 1. Review the chart:")
|
||||||
|
print(f" cd {chart_dir}")
|
||||||
|
print(" cat Chart.yaml")
|
||||||
|
print("")
|
||||||
|
print(" 2. Review generated templates:")
|
||||||
|
print(" ls templates/")
|
||||||
|
print("")
|
||||||
|
print(" 3. Install to Kubernetes:")
|
||||||
|
print(f" helm install {chart_name} {chart_dir}")
|
||||||
|
print("")
|
||||||
|
print(" # Or use your own release name")
|
||||||
|
print(f" helm install <your-release-name> {chart_dir}")
|
||||||
|
print("")
|
||||||
|
print(" 4. Check deployment:")
|
||||||
|
print(" kubectl get pods")
|
||||||
|
print("")
|
||||||
171
stack_orchestrator/deploy/k8s/helm/job_runner.py
Normal file
171
stack_orchestrator/deploy/k8s/helm/job_runner.py
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
# Copyright © 2025 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from stack_orchestrator.util import get_yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_release_name_from_chart(chart_dir: Path) -> str:
|
||||||
|
"""
|
||||||
|
Read the chart name from Chart.yaml to use as the release name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chart_dir: Path to the Helm chart directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Chart name from Chart.yaml
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception if Chart.yaml not found or name is missing
|
||||||
|
"""
|
||||||
|
chart_yaml_path = chart_dir / "Chart.yaml"
|
||||||
|
if not chart_yaml_path.exists():
|
||||||
|
raise Exception(f"Chart.yaml not found: {chart_yaml_path}")
|
||||||
|
|
||||||
|
yaml = get_yaml()
|
||||||
|
chart_yaml = yaml.load(open(chart_yaml_path, "r"))
|
||||||
|
|
||||||
|
if "name" not in chart_yaml:
|
||||||
|
raise Exception(f"Chart name not found in {chart_yaml_path}")
|
||||||
|
|
||||||
|
return chart_yaml["name"]
|
||||||
|
|
||||||
|
|
||||||
|
def run_helm_job(
|
||||||
|
chart_dir: Path,
|
||||||
|
job_name: str,
|
||||||
|
release: Optional[str] = None,
|
||||||
|
namespace: str = "default",
|
||||||
|
timeout: int = 600,
|
||||||
|
verbose: bool = False,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Run a one-time job from a Helm chart.
|
||||||
|
|
||||||
|
This function:
|
||||||
|
1. Uses provided release name, or reads it from Chart.yaml if not provided
|
||||||
|
2. Uses helm template to render the job manifest with the job enabled
|
||||||
|
3. Applies the job manifest to the cluster
|
||||||
|
4. Waits for the job to complete
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chart_dir: Path to the Helm chart directory
|
||||||
|
job_name: Name of the job to run (without -job suffix)
|
||||||
|
release: Optional Helm release name (defaults to chart name from Chart.yaml)
|
||||||
|
namespace: Kubernetes namespace
|
||||||
|
timeout: Timeout in seconds for job completion (default: 600)
|
||||||
|
verbose: Enable verbose output
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception if the job fails or times out
|
||||||
|
"""
|
||||||
|
if not chart_dir.exists():
|
||||||
|
raise Exception(f"Chart directory not found: {chart_dir}")
|
||||||
|
|
||||||
|
# Use provided release name, or get it from Chart.yaml
|
||||||
|
if release is None:
|
||||||
|
release = get_release_name_from_chart(chart_dir)
|
||||||
|
if verbose:
|
||||||
|
print(f"Using release name from Chart.yaml: {release}")
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
print(f"Using provided release name: {release}")
|
||||||
|
|
||||||
|
job_template_file = f"templates/{job_name}-job.yaml"
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"Running job '{job_name}' from helm chart: {chart_dir}")
|
||||||
|
|
||||||
|
# Use helm template to render the job manifest
|
||||||
|
with tempfile.NamedTemporaryFile(
|
||||||
|
mode="w", suffix=".yaml", delete=False
|
||||||
|
) as tmp_file:
|
||||||
|
try:
|
||||||
|
# Render job template with job enabled
|
||||||
|
# Use --set-json to properly handle job names with dashes
|
||||||
|
jobs_dict = {job_name: {"enabled": True}}
|
||||||
|
values_json = json.dumps(jobs_dict)
|
||||||
|
helm_cmd = [
|
||||||
|
"helm",
|
||||||
|
"template",
|
||||||
|
release,
|
||||||
|
str(chart_dir),
|
||||||
|
"--show-only",
|
||||||
|
job_template_file,
|
||||||
|
"--set-json",
|
||||||
|
f"jobs={values_json}",
|
||||||
|
]
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"Running: {' '.join(helm_cmd)}")
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
helm_cmd, check=True, capture_output=True, text=True
|
||||||
|
)
|
||||||
|
tmp_file.write(result.stdout)
|
||||||
|
tmp_file.flush()
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"Generated job manifest:\n{result.stdout}")
|
||||||
|
|
||||||
|
# Parse the manifest to get the actual job name
|
||||||
|
yaml = get_yaml()
|
||||||
|
manifest = yaml.load(result.stdout)
|
||||||
|
actual_job_name = manifest.get("metadata", {}).get("name", job_name)
|
||||||
|
|
||||||
|
# Apply the job manifest
|
||||||
|
kubectl_apply_cmd = [
|
||||||
|
"kubectl",
|
||||||
|
"apply",
|
||||||
|
"-f",
|
||||||
|
tmp_file.name,
|
||||||
|
"-n",
|
||||||
|
namespace,
|
||||||
|
]
|
||||||
|
subprocess.run(
|
||||||
|
kubectl_apply_cmd, check=True, capture_output=True, text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"Job {actual_job_name} created, waiting for completion...")
|
||||||
|
|
||||||
|
# Wait for job completion
|
||||||
|
wait_cmd = [
|
||||||
|
"kubectl",
|
||||||
|
"wait",
|
||||||
|
"--for=condition=complete",
|
||||||
|
f"job/{actual_job_name}",
|
||||||
|
f"--timeout={timeout}s",
|
||||||
|
"-n",
|
||||||
|
namespace,
|
||||||
|
]
|
||||||
|
|
||||||
|
subprocess.run(wait_cmd, check=True, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"Job {job_name} completed successfully")
|
||||||
|
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = e.stderr if e.stderr else str(e)
|
||||||
|
raise Exception(f"Job failed: {error_msg}")
|
||||||
|
finally:
|
||||||
|
# Clean up temp file
|
||||||
|
if os.path.exists(tmp_file.name):
|
||||||
|
os.unlink(tmp_file.name)
|
||||||
103
stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py
Normal file
103
stack_orchestrator/deploy/k8s/helm/kompose_wrapper.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# Copyright © 2025 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import shutil
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
def check_kompose_available() -> bool:
|
||||||
|
"""Check if kompose binary is available in PATH."""
|
||||||
|
return shutil.which("kompose") is not None
|
||||||
|
|
||||||
|
|
||||||
|
def get_kompose_version() -> str:
|
||||||
|
"""
|
||||||
|
Get the installed kompose version.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Version string (e.g., "1.34.0")
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception if kompose is not available
|
||||||
|
"""
|
||||||
|
if not check_kompose_available():
|
||||||
|
raise Exception("kompose not found in PATH")
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
["kompose", "version"], capture_output=True, text=True, timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise Exception(f"Failed to get kompose version: {result.stderr}")
|
||||||
|
|
||||||
|
# Parse version from output like "1.34.0 (HEAD)"
|
||||||
|
# Output format: "1.34.0 (HEAD)" or just "1.34.0"
|
||||||
|
version_line = result.stdout.strip()
|
||||||
|
version = version_line.split()[0] if version_line else "unknown"
|
||||||
|
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_helm_chart(
|
||||||
|
compose_files: List[Path], output_dir: Path, chart_name: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Invoke kompose to convert Docker Compose files to a Helm chart.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compose_files: List of paths to docker-compose.yml files
|
||||||
|
output_dir: Directory where the Helm chart will be generated
|
||||||
|
chart_name: Optional name for the chart (defaults to directory name)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
stdout from kompose command
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception if kompose conversion fails
|
||||||
|
"""
|
||||||
|
if not check_kompose_available():
|
||||||
|
raise Exception(
|
||||||
|
"kompose not found in PATH. "
|
||||||
|
"Install from: https://kompose.io/installation/"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure output directory exists
|
||||||
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Build kompose command
|
||||||
|
cmd = ["kompose", "convert"]
|
||||||
|
|
||||||
|
# Add all compose files
|
||||||
|
for compose_file in compose_files:
|
||||||
|
if not compose_file.exists():
|
||||||
|
raise Exception(f"Compose file not found: {compose_file}")
|
||||||
|
cmd.extend(["-f", str(compose_file)])
|
||||||
|
|
||||||
|
# Add chart flag and output directory
|
||||||
|
cmd.extend(["--chart", "-o", str(output_dir)])
|
||||||
|
|
||||||
|
# Execute kompose
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
raise Exception(
|
||||||
|
f"Kompose conversion failed:\n"
|
||||||
|
f"Command: {' '.join(cmd)}\n"
|
||||||
|
f"Error: {result.stderr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.stdout
|
||||||
@ -18,12 +18,31 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import subprocess
|
import subprocess
|
||||||
import re
|
import re
|
||||||
from typing import Set, Mapping, List
|
from typing import Set, Mapping, List, Optional, cast
|
||||||
|
|
||||||
from stack_orchestrator.util import get_k8s_dir, error_exit
|
from stack_orchestrator.util import get_k8s_dir, error_exit
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
|
from stack_orchestrator.deploy.deploy_util import parsed_pod_files_map_from_file_names
|
||||||
from stack_orchestrator.deploy.deployer import DeployerException
|
from stack_orchestrator.deploy.deployer import DeployerException
|
||||||
|
from stack_orchestrator import constants
|
||||||
|
|
||||||
|
|
||||||
|
def get_kind_cluster():
|
||||||
|
"""Get an existing kind cluster, if any.
|
||||||
|
|
||||||
|
Uses `kind get clusters` to find existing clusters.
|
||||||
|
Returns the cluster name or None if no cluster exists.
|
||||||
|
"""
|
||||||
|
result = subprocess.run(
|
||||||
|
"kind get clusters", shell=True, capture_output=True, text=True
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
clusters = result.stdout.strip().splitlines()
|
||||||
|
if clusters:
|
||||||
|
return clusters[0] # Return the first cluster found
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _run_command(command: str):
|
def _run_command(command: str):
|
||||||
@ -50,38 +69,53 @@ def wait_for_ingress_in_kind():
|
|||||||
for i in range(20):
|
for i in range(20):
|
||||||
warned_waiting = False
|
warned_waiting = False
|
||||||
w = watch.Watch()
|
w = watch.Watch()
|
||||||
for event in w.stream(func=core_v1.list_namespaced_pod,
|
for event in w.stream(
|
||||||
namespace="ingress-nginx",
|
func=core_v1.list_namespaced_pod,
|
||||||
label_selector="app.kubernetes.io/component=controller",
|
namespace="caddy-system",
|
||||||
timeout_seconds=30):
|
label_selector=(
|
||||||
if event['object'].status.container_statuses:
|
"app.kubernetes.io/name=caddy-ingress-controller,"
|
||||||
if event['object'].status.container_statuses[0].ready is True:
|
"app.kubernetes.io/component=controller"
|
||||||
|
),
|
||||||
|
timeout_seconds=30,
|
||||||
|
):
|
||||||
|
event_dict = cast(dict, event)
|
||||||
|
pod = cast(client.V1Pod, event_dict.get("object"))
|
||||||
|
if pod and pod.status and pod.status.container_statuses:
|
||||||
|
if pod.status.container_statuses[0].ready is True:
|
||||||
if warned_waiting:
|
if warned_waiting:
|
||||||
print("Ingress controller is ready")
|
print("Caddy ingress controller is ready")
|
||||||
return
|
return
|
||||||
print("Waiting for ingress controller to become ready...")
|
print("Waiting for Caddy ingress controller to become ready...")
|
||||||
warned_waiting = True
|
warned_waiting = True
|
||||||
error_exit("ERROR: Timed out waiting for ingress to become ready")
|
error_exit("ERROR: Timed out waiting for Caddy ingress to become ready")
|
||||||
|
|
||||||
|
|
||||||
def install_ingress_for_kind():
|
def install_ingress_for_kind():
|
||||||
api_client = client.ApiClient()
|
api_client = client.ApiClient()
|
||||||
ingress_install = os.path.abspath(get_k8s_dir().joinpath("components", "ingress", "ingress-nginx-kind-deploy.yaml"))
|
ingress_install = os.path.abspath(
|
||||||
|
get_k8s_dir().joinpath(
|
||||||
|
"components", "ingress", "ingress-caddy-kind-deploy.yaml"
|
||||||
|
)
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("Installing nginx ingress controller in kind cluster")
|
print("Installing Caddy ingress controller in kind cluster")
|
||||||
utils.create_from_yaml(api_client, yaml_file=ingress_install)
|
utils.create_from_yaml(api_client, yaml_file=ingress_install)
|
||||||
|
|
||||||
|
|
||||||
def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
|
def load_images_into_kind(kind_cluster_name: str, image_set: Set[str]):
|
||||||
for image in image_set:
|
for image in image_set:
|
||||||
result = _run_command(f"kind load docker-image {image} --name {kind_cluster_name}")
|
result = _run_command(
|
||||||
|
f"kind load docker-image {image} --name {kind_cluster_name}"
|
||||||
|
)
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
raise DeployerException(f"kind create cluster failed: {result}")
|
raise DeployerException(f"kind load docker-image failed: {result}")
|
||||||
|
|
||||||
|
|
||||||
def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
||||||
pods = []
|
pods = []
|
||||||
pod_response = core_api.list_namespaced_pod(namespace="default", label_selector=f"app={deployment_name}")
|
pod_response = core_api.list_namespaced_pod(
|
||||||
|
namespace="default", label_selector=f"app={deployment_name}"
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"pod_response: {pod_response}")
|
print(f"pod_response: {pod_response}")
|
||||||
for pod_info in pod_response.items:
|
for pod_info in pod_response.items:
|
||||||
@ -90,14 +124,18 @@ def pods_in_deployment(core_api: client.CoreV1Api, deployment_name: str):
|
|||||||
return pods
|
return pods
|
||||||
|
|
||||||
|
|
||||||
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str):
|
def containers_in_pod(core_api: client.CoreV1Api, pod_name: str) -> List[str]:
|
||||||
containers = []
|
containers: List[str] = []
|
||||||
pod_response = core_api.read_namespaced_pod(pod_name, namespace="default")
|
pod_response = cast(
|
||||||
|
client.V1Pod, core_api.read_namespaced_pod(pod_name, namespace="default")
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"pod_response: {pod_response}")
|
print(f"pod_response: {pod_response}")
|
||||||
pod_containers = pod_response.spec.containers
|
if not pod_response.spec or not pod_response.spec.containers:
|
||||||
for pod_container in pod_containers:
|
return containers
|
||||||
containers.append(pod_container.name)
|
for pod_container in pod_response.spec.containers:
|
||||||
|
if pod_container.name:
|
||||||
|
containers.append(pod_container.name)
|
||||||
return containers
|
return containers
|
||||||
|
|
||||||
|
|
||||||
@ -137,13 +175,16 @@ def volume_mounts_for_service(parsed_pod_files, service):
|
|||||||
if "volumes" in service_obj:
|
if "volumes" in service_obj:
|
||||||
volumes = service_obj["volumes"]
|
volumes = service_obj["volumes"]
|
||||||
for mount_string in volumes:
|
for mount_string in volumes:
|
||||||
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
# Looks like: test-data:/data
|
||||||
|
# or test-data:/data:ro or test-data:/data:rw
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"mount_string: {mount_string}")
|
print(f"mount_string: {mount_string}")
|
||||||
mount_split = mount_string.split(":")
|
mount_split = mount_string.split(":")
|
||||||
volume_name = mount_split[0]
|
volume_name = mount_split[0]
|
||||||
mount_path = mount_split[1]
|
mount_path = mount_split[1]
|
||||||
mount_options = mount_split[2] if len(mount_split) == 3 else None
|
mount_options = (
|
||||||
|
mount_split[2] if len(mount_split) == 3 else None
|
||||||
|
)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"volume_name: {volume_name}")
|
print(f"volume_name: {volume_name}")
|
||||||
print(f"mount path: {mount_path}")
|
print(f"mount path: {mount_path}")
|
||||||
@ -151,7 +192,7 @@ def volume_mounts_for_service(parsed_pod_files, service):
|
|||||||
volume_device = client.V1VolumeMount(
|
volume_device = client.V1VolumeMount(
|
||||||
mount_path=mount_path,
|
mount_path=mount_path,
|
||||||
name=volume_name,
|
name=volume_name,
|
||||||
read_only="ro" == mount_options
|
read_only="ro" == mount_options,
|
||||||
)
|
)
|
||||||
result.append(volume_device)
|
result.append(volume_device)
|
||||||
return result
|
return result
|
||||||
@ -165,12 +206,19 @@ def volumes_for_pod_files(parsed_pod_files, spec, app_name):
|
|||||||
volumes = parsed_pod_file["volumes"]
|
volumes = parsed_pod_file["volumes"]
|
||||||
for volume_name in volumes.keys():
|
for volume_name in volumes.keys():
|
||||||
if volume_name in spec.get_configmaps():
|
if volume_name in spec.get_configmaps():
|
||||||
config_map = client.V1ConfigMapVolumeSource(name=f"{app_name}-{volume_name}")
|
# Set defaultMode=0o755 to make scripts executable
|
||||||
|
config_map = client.V1ConfigMapVolumeSource(
|
||||||
|
name=f"{app_name}-{volume_name}", default_mode=0o755
|
||||||
|
)
|
||||||
volume = client.V1Volume(name=volume_name, config_map=config_map)
|
volume = client.V1Volume(name=volume_name, config_map=config_map)
|
||||||
result.append(volume)
|
result.append(volume)
|
||||||
else:
|
else:
|
||||||
claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=f"{app_name}-{volume_name}")
|
claim = client.V1PersistentVolumeClaimVolumeSource(
|
||||||
volume = client.V1Volume(name=volume_name, persistent_volume_claim=claim)
|
claim_name=f"{app_name}-{volume_name}"
|
||||||
|
)
|
||||||
|
volume = client.V1Volume(
|
||||||
|
name=volume_name, persistent_volume_claim=claim
|
||||||
|
)
|
||||||
result.append(volume)
|
result.append(volume)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -202,7 +250,8 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
|
|||||||
if "volumes" in service_obj:
|
if "volumes" in service_obj:
|
||||||
volumes = service_obj["volumes"]
|
volumes = service_obj["volumes"]
|
||||||
for mount_string in volumes:
|
for mount_string in volumes:
|
||||||
# Looks like: test-data:/data or test-data:/data:ro or test-data:/data:rw
|
# Looks like: test-data:/data
|
||||||
|
# or test-data:/data:ro or test-data:/data:rw
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"mount_string: {mount_string}")
|
print(f"mount_string: {mount_string}")
|
||||||
mount_split = mount_string.split(":")
|
mount_split = mount_string.split(":")
|
||||||
@ -214,15 +263,21 @@ def _generate_kind_mounts(parsed_pod_files, deployment_dir, deployment_context):
|
|||||||
print(f"mount path: {mount_path}")
|
print(f"mount path: {mount_path}")
|
||||||
if volume_name not in deployment_context.spec.get_configmaps():
|
if volume_name not in deployment_context.spec.get_configmaps():
|
||||||
if volume_host_path_map[volume_name]:
|
if volume_host_path_map[volume_name]:
|
||||||
|
host_path = _make_absolute_host_path(
|
||||||
|
volume_host_path_map[volume_name],
|
||||||
|
deployment_dir,
|
||||||
|
)
|
||||||
|
container_path = get_kind_pv_bind_mount_path(
|
||||||
|
volume_name
|
||||||
|
)
|
||||||
volume_definitions.append(
|
volume_definitions.append(
|
||||||
f" - hostPath: {_make_absolute_host_path(volume_host_path_map[volume_name], deployment_dir)}\n"
|
f" - hostPath: {host_path}\n"
|
||||||
f" containerPath: {get_kind_pv_bind_mount_path(volume_name)}\n"
|
f" containerPath: {container_path}\n"
|
||||||
)
|
)
|
||||||
return (
|
return (
|
||||||
"" if len(volume_definitions) == 0 else (
|
""
|
||||||
" extraMounts:\n"
|
if len(volume_definitions) == 0
|
||||||
f"{''.join(volume_definitions)}"
|
else (" extraMounts:\n" f"{''.join(volume_definitions)}")
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -240,25 +295,233 @@ def _generate_kind_port_mappings_from_services(parsed_pod_files):
|
|||||||
for port_string in ports:
|
for port_string in ports:
|
||||||
# TODO handle the complex cases
|
# TODO handle the complex cases
|
||||||
# Looks like: 80 or something more complicated
|
# Looks like: 80 or something more complicated
|
||||||
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
|
port_definitions.append(
|
||||||
|
f" - containerPort: {port_string}\n"
|
||||||
|
f" hostPort: {port_string}\n"
|
||||||
|
)
|
||||||
return (
|
return (
|
||||||
"" if len(port_definitions) == 0 else (
|
""
|
||||||
" extraPortMappings:\n"
|
if len(port_definitions) == 0
|
||||||
f"{''.join(port_definitions)}"
|
else (" extraPortMappings:\n" f"{''.join(port_definitions)}")
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _generate_kind_port_mappings(parsed_pod_files):
|
def _generate_kind_port_mappings(parsed_pod_files):
|
||||||
port_definitions = []
|
port_definitions = []
|
||||||
# For now we just map port 80 for the nginx ingress controller we install in kind
|
# Map port 80 and 443 for the Caddy ingress controller (HTTPS support)
|
||||||
port_string = "80"
|
for port_string in ["80", "443"]:
|
||||||
port_definitions.append(f" - containerPort: {port_string}\n hostPort: {port_string}\n")
|
port_definitions.append(
|
||||||
return (
|
f" - containerPort: {port_string}\n hostPort: {port_string}\n"
|
||||||
"" if len(port_definitions) == 0 else (
|
|
||||||
" extraPortMappings:\n"
|
|
||||||
f"{''.join(port_definitions)}"
|
|
||||||
)
|
)
|
||||||
|
return (
|
||||||
|
""
|
||||||
|
if len(port_definitions) == 0
|
||||||
|
else (" extraPortMappings:\n" f"{''.join(port_definitions)}")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_high_memlock_spec_mount(deployment_dir: Path):
|
||||||
|
"""Generate the extraMount entry for high-memlock-spec.json.
|
||||||
|
|
||||||
|
The spec file must be mounted at the same path inside the kind node
|
||||||
|
as it appears on the host, because containerd's base_runtime_spec
|
||||||
|
references an absolute path.
|
||||||
|
"""
|
||||||
|
spec_path = deployment_dir.joinpath(constants.high_memlock_spec_filename).resolve()
|
||||||
|
return f" - hostPath: {spec_path}\n" f" containerPath: {spec_path}\n"
|
||||||
|
|
||||||
|
|
||||||
|
def generate_high_memlock_spec_json():
|
||||||
|
"""Generate OCI spec JSON with unlimited RLIMIT_MEMLOCK.
|
||||||
|
|
||||||
|
This is needed for workloads like Solana validators that require large
|
||||||
|
amounts of locked memory for memory-mapped files during snapshot decompression.
|
||||||
|
|
||||||
|
The IPC_LOCK capability alone doesn't raise the RLIMIT_MEMLOCK limit - it only
|
||||||
|
allows mlock() calls. We need to set the rlimit in the OCI runtime spec.
|
||||||
|
|
||||||
|
IMPORTANT: This must be a complete OCI runtime spec, not just the rlimits
|
||||||
|
section. The spec is based on kind's default cri-base.json with rlimits added.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Use maximum 64-bit signed integer value for unlimited
|
||||||
|
max_rlimit = 9223372036854775807
|
||||||
|
# Based on kind's /etc/containerd/cri-base.json with rlimits added
|
||||||
|
spec = {
|
||||||
|
"ociVersion": "1.1.0-rc.1",
|
||||||
|
"process": {
|
||||||
|
"user": {"uid": 0, "gid": 0},
|
||||||
|
"cwd": "/",
|
||||||
|
"capabilities": {
|
||||||
|
"bounding": [
|
||||||
|
"CAP_CHOWN",
|
||||||
|
"CAP_DAC_OVERRIDE",
|
||||||
|
"CAP_FSETID",
|
||||||
|
"CAP_FOWNER",
|
||||||
|
"CAP_MKNOD",
|
||||||
|
"CAP_NET_RAW",
|
||||||
|
"CAP_SETGID",
|
||||||
|
"CAP_SETUID",
|
||||||
|
"CAP_SETFCAP",
|
||||||
|
"CAP_SETPCAP",
|
||||||
|
"CAP_NET_BIND_SERVICE",
|
||||||
|
"CAP_SYS_CHROOT",
|
||||||
|
"CAP_KILL",
|
||||||
|
"CAP_AUDIT_WRITE",
|
||||||
|
],
|
||||||
|
"effective": [
|
||||||
|
"CAP_CHOWN",
|
||||||
|
"CAP_DAC_OVERRIDE",
|
||||||
|
"CAP_FSETID",
|
||||||
|
"CAP_FOWNER",
|
||||||
|
"CAP_MKNOD",
|
||||||
|
"CAP_NET_RAW",
|
||||||
|
"CAP_SETGID",
|
||||||
|
"CAP_SETUID",
|
||||||
|
"CAP_SETFCAP",
|
||||||
|
"CAP_SETPCAP",
|
||||||
|
"CAP_NET_BIND_SERVICE",
|
||||||
|
"CAP_SYS_CHROOT",
|
||||||
|
"CAP_KILL",
|
||||||
|
"CAP_AUDIT_WRITE",
|
||||||
|
],
|
||||||
|
"permitted": [
|
||||||
|
"CAP_CHOWN",
|
||||||
|
"CAP_DAC_OVERRIDE",
|
||||||
|
"CAP_FSETID",
|
||||||
|
"CAP_FOWNER",
|
||||||
|
"CAP_MKNOD",
|
||||||
|
"CAP_NET_RAW",
|
||||||
|
"CAP_SETGID",
|
||||||
|
"CAP_SETUID",
|
||||||
|
"CAP_SETFCAP",
|
||||||
|
"CAP_SETPCAP",
|
||||||
|
"CAP_NET_BIND_SERVICE",
|
||||||
|
"CAP_SYS_CHROOT",
|
||||||
|
"CAP_KILL",
|
||||||
|
"CAP_AUDIT_WRITE",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"rlimits": [
|
||||||
|
{"type": "RLIMIT_MEMLOCK", "hard": max_rlimit, "soft": max_rlimit},
|
||||||
|
{"type": "RLIMIT_NOFILE", "hard": 1048576, "soft": 1048576},
|
||||||
|
],
|
||||||
|
"noNewPrivileges": True,
|
||||||
|
},
|
||||||
|
"root": {"path": "rootfs"},
|
||||||
|
"mounts": [
|
||||||
|
{
|
||||||
|
"destination": "/proc",
|
||||||
|
"type": "proc",
|
||||||
|
"source": "proc",
|
||||||
|
"options": ["nosuid", "noexec", "nodev"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/dev",
|
||||||
|
"type": "tmpfs",
|
||||||
|
"source": "tmpfs",
|
||||||
|
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/dev/pts",
|
||||||
|
"type": "devpts",
|
||||||
|
"source": "devpts",
|
||||||
|
"options": [
|
||||||
|
"nosuid",
|
||||||
|
"noexec",
|
||||||
|
"newinstance",
|
||||||
|
"ptmxmode=0666",
|
||||||
|
"mode=0620",
|
||||||
|
"gid=5",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/dev/shm",
|
||||||
|
"type": "tmpfs",
|
||||||
|
"source": "shm",
|
||||||
|
"options": ["nosuid", "noexec", "nodev", "mode=1777", "size=65536k"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/dev/mqueue",
|
||||||
|
"type": "mqueue",
|
||||||
|
"source": "mqueue",
|
||||||
|
"options": ["nosuid", "noexec", "nodev"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/sys",
|
||||||
|
"type": "sysfs",
|
||||||
|
"source": "sysfs",
|
||||||
|
"options": ["nosuid", "noexec", "nodev", "ro"],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"destination": "/run",
|
||||||
|
"type": "tmpfs",
|
||||||
|
"source": "tmpfs",
|
||||||
|
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"linux": {
|
||||||
|
"resources": {"devices": [{"allow": False, "access": "rwm"}]},
|
||||||
|
"cgroupsPath": "/default",
|
||||||
|
"namespaces": [
|
||||||
|
{"type": "pid"},
|
||||||
|
{"type": "ipc"},
|
||||||
|
{"type": "uts"},
|
||||||
|
{"type": "mount"},
|
||||||
|
{"type": "network"},
|
||||||
|
],
|
||||||
|
"maskedPaths": [
|
||||||
|
"/proc/acpi",
|
||||||
|
"/proc/asound",
|
||||||
|
"/proc/kcore",
|
||||||
|
"/proc/keys",
|
||||||
|
"/proc/latency_stats",
|
||||||
|
"/proc/timer_list",
|
||||||
|
"/proc/timer_stats",
|
||||||
|
"/proc/sched_debug",
|
||||||
|
"/sys/firmware",
|
||||||
|
"/proc/scsi",
|
||||||
|
],
|
||||||
|
"readonlyPaths": [
|
||||||
|
"/proc/bus",
|
||||||
|
"/proc/fs",
|
||||||
|
"/proc/irq",
|
||||||
|
"/proc/sys",
|
||||||
|
"/proc/sysrq-trigger",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"hooks": {"createContainer": [{"path": "/kind/bin/mount-product-files.sh"}]},
|
||||||
|
}
|
||||||
|
return json.dumps(spec, indent=2)
|
||||||
|
|
||||||
|
|
||||||
|
# Keep old name as alias for backward compatibility
|
||||||
|
def generate_cri_base_json():
|
||||||
|
"""Deprecated: Use generate_high_memlock_spec_json() instead."""
|
||||||
|
return generate_high_memlock_spec_json()
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_containerd_config_patches(
|
||||||
|
deployment_dir: Path, has_high_memlock: bool
|
||||||
|
) -> str:
|
||||||
|
"""Generate containerdConfigPatches YAML for custom runtime handlers.
|
||||||
|
|
||||||
|
This configures containerd to have a runtime handler named 'high-memlock'
|
||||||
|
that uses a custom OCI base spec with unlimited RLIMIT_MEMLOCK.
|
||||||
|
"""
|
||||||
|
if not has_high_memlock:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
spec_path = deployment_dir.joinpath(constants.high_memlock_spec_filename).resolve()
|
||||||
|
runtime_name = constants.high_memlock_runtime
|
||||||
|
plugin_path = 'plugins."io.containerd.grpc.v1.cri".containerd.runtimes'
|
||||||
|
return (
|
||||||
|
"containerdConfigPatches:\n"
|
||||||
|
" - |-\n"
|
||||||
|
f" [{plugin_path}.{runtime_name}]\n"
|
||||||
|
' runtime_type = "io.containerd.runc.v2"\n'
|
||||||
|
f' base_runtime_spec = "{spec_path}"\n'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -268,28 +531,45 @@ def merge_envs(a: Mapping[str, str], b: Mapping[str, str]) -> Mapping[str, str]:
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _expand_shell_vars(raw_val: str) -> str:
|
def _expand_shell_vars(
|
||||||
# could be: <string> or ${<env-var-name>} or ${<env-var-name>:-<default-value>}
|
raw_val: str, env_map: Optional[Mapping[str, str]] = None
|
||||||
# TODO: implement support for variable substitution and default values
|
) -> str:
|
||||||
# if raw_val is like ${<something>} print a warning and substitute an empty string
|
# Expand docker-compose style variable substitution:
|
||||||
# otherwise return raw_val
|
# ${VAR} - use VAR value or empty string
|
||||||
match = re.search(r"^\$\{(.*)\}$", raw_val)
|
# ${VAR:-default} - use VAR value or default if unset/empty
|
||||||
|
# ${VAR-default} - use VAR value or default if unset
|
||||||
|
if env_map is None:
|
||||||
|
env_map = {}
|
||||||
|
if raw_val is None:
|
||||||
|
return ""
|
||||||
|
match = re.search(r"^\$\{([^}]+)\}$", raw_val)
|
||||||
if match:
|
if match:
|
||||||
print(f"WARNING: found unimplemented environment variable substitution: {raw_val}")
|
inner = match.group(1)
|
||||||
else:
|
# Check for default value syntax
|
||||||
return raw_val
|
if ":-" in inner:
|
||||||
|
var_name, default_val = inner.split(":-", 1)
|
||||||
|
return env_map.get(var_name, "") or default_val
|
||||||
|
elif "-" in inner:
|
||||||
|
var_name, default_val = inner.split("-", 1)
|
||||||
|
return env_map.get(var_name, default_val)
|
||||||
|
else:
|
||||||
|
return env_map.get(inner, "")
|
||||||
|
return raw_val
|
||||||
|
|
||||||
|
|
||||||
# TODO: handle the case where the same env var is defined in multiple places
|
def envs_from_compose_file(
|
||||||
def envs_from_compose_file(compose_file_envs: Mapping[str, str]) -> Mapping[str, str]:
|
compose_file_envs: Mapping[str, str], env_map: Optional[Mapping[str, str]] = None
|
||||||
|
) -> Mapping[str, str]:
|
||||||
result = {}
|
result = {}
|
||||||
for env_var, env_val in compose_file_envs.items():
|
for env_var, env_val in compose_file_envs.items():
|
||||||
expanded_env_val = _expand_shell_vars(env_val)
|
expanded_env_val = _expand_shell_vars(env_val, env_map)
|
||||||
result.update({env_var: expanded_env_val})
|
result.update({env_var: expanded_env_val})
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def envs_from_environment_variables_map(map: Mapping[str, str]) -> List[client.V1EnvVar]:
|
def envs_from_environment_variables_map(
|
||||||
|
map: Mapping[str, str]
|
||||||
|
) -> List[client.V1EnvVar]:
|
||||||
result = []
|
result = []
|
||||||
for env_var, env_val in map.items():
|
for env_var, env_val in map.items():
|
||||||
result.append(client.V1EnvVar(env_var, env_val))
|
result.append(client.V1EnvVar(env_var, env_val))
|
||||||
@ -320,10 +600,34 @@ def generate_kind_config(deployment_dir: Path, deployment_context):
|
|||||||
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
pod_files = [p for p in compose_file_dir.iterdir() if p.is_file()]
|
||||||
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
parsed_pod_files_map = parsed_pod_files_map_from_file_names(pod_files)
|
||||||
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
port_mappings_yml = _generate_kind_port_mappings(parsed_pod_files_map)
|
||||||
mounts_yml = _generate_kind_mounts(parsed_pod_files_map, deployment_dir, deployment_context)
|
mounts_yml = _generate_kind_mounts(
|
||||||
return (
|
parsed_pod_files_map, deployment_dir, deployment_context
|
||||||
"kind: Cluster\n"
|
)
|
||||||
"apiVersion: kind.x-k8s.io/v1alpha4\n"
|
|
||||||
|
# Check if unlimited_memlock is enabled
|
||||||
|
unlimited_memlock = deployment_context.spec.get_unlimited_memlock()
|
||||||
|
|
||||||
|
# Generate containerdConfigPatches for RuntimeClass support
|
||||||
|
containerd_patches_yml = _generate_containerd_config_patches(
|
||||||
|
deployment_dir, unlimited_memlock
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add high-memlock spec file mount if needed
|
||||||
|
if unlimited_memlock:
|
||||||
|
spec_mount = _generate_high_memlock_spec_mount(deployment_dir)
|
||||||
|
if mounts_yml:
|
||||||
|
# Append to existing mounts
|
||||||
|
mounts_yml = mounts_yml.rstrip() + "\n" + spec_mount
|
||||||
|
else:
|
||||||
|
mounts_yml = f" extraMounts:\n{spec_mount}"
|
||||||
|
|
||||||
|
# Build the config - containerdConfigPatches must be at cluster level (before nodes)
|
||||||
|
config = "kind: Cluster\n" "apiVersion: kind.x-k8s.io/v1alpha4\n"
|
||||||
|
|
||||||
|
if containerd_patches_yml:
|
||||||
|
config += containerd_patches_yml
|
||||||
|
|
||||||
|
config += (
|
||||||
"nodes:\n"
|
"nodes:\n"
|
||||||
"- role: control-plane\n"
|
"- role: control-plane\n"
|
||||||
" kubeadmConfigPatches:\n"
|
" kubeadmConfigPatches:\n"
|
||||||
@ -331,7 +635,9 @@ def generate_kind_config(deployment_dir: Path, deployment_context):
|
|||||||
" kind: InitConfiguration\n"
|
" kind: InitConfiguration\n"
|
||||||
" nodeRegistration:\n"
|
" nodeRegistration:\n"
|
||||||
" kubeletExtraArgs:\n"
|
" kubeletExtraArgs:\n"
|
||||||
" node-labels: \"ingress-ready=true\"\n"
|
' node-labels: "ingress-ready=true"\n'
|
||||||
f"{port_mappings_yml}\n"
|
f"{port_mappings_yml}\n"
|
||||||
f"{mounts_yml}\n"
|
f"{mounts_yml}\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return config
|
||||||
|
|||||||
43
stack_orchestrator/deploy/k8s/k8s_command.py
Normal file
43
stack_orchestrator/deploy/k8s/k8s_command.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright © 2024 Vulcanize
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.k8s.helpers import get_kind_cluster
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx):
|
||||||
|
"""k8s cluster management commands"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@command.group()
|
||||||
|
@click.pass_context
|
||||||
|
def list(ctx):
|
||||||
|
"""list k8s resources"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@list.command()
|
||||||
|
@click.pass_context
|
||||||
|
def cluster(ctx):
|
||||||
|
"""Show the existing kind cluster"""
|
||||||
|
existing_cluster = get_kind_cluster()
|
||||||
|
if existing_cluster:
|
||||||
|
print(existing_cluster)
|
||||||
|
else:
|
||||||
|
print("No cluster found")
|
||||||
@ -14,6 +14,7 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import typing
|
import typing
|
||||||
|
from typing import Optional
|
||||||
import humanfriendly
|
import humanfriendly
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -23,9 +24,9 @@ from stack_orchestrator import constants
|
|||||||
|
|
||||||
|
|
||||||
class ResourceLimits:
|
class ResourceLimits:
|
||||||
cpus: float = None
|
cpus: Optional[float] = None
|
||||||
memory: int = None
|
memory: Optional[int] = None
|
||||||
storage: int = None
|
storage: Optional[int] = None
|
||||||
|
|
||||||
def __init__(self, obj=None):
|
def __init__(self, obj=None):
|
||||||
if obj is None:
|
if obj is None:
|
||||||
@ -49,8 +50,8 @@ class ResourceLimits:
|
|||||||
|
|
||||||
|
|
||||||
class Resources:
|
class Resources:
|
||||||
limits: ResourceLimits = None
|
limits: Optional[ResourceLimits] = None
|
||||||
reservations: ResourceLimits = None
|
reservations: Optional[ResourceLimits] = None
|
||||||
|
|
||||||
def __init__(self, obj=None):
|
def __init__(self, obj=None):
|
||||||
if obj is None:
|
if obj is None:
|
||||||
@ -72,11 +73,10 @@ class Resources:
|
|||||||
|
|
||||||
|
|
||||||
class Spec:
|
class Spec:
|
||||||
|
|
||||||
obj: typing.Any
|
obj: typing.Any
|
||||||
file_path: Path
|
file_path: Optional[Path]
|
||||||
|
|
||||||
def __init__(self, file_path: Path = None, obj=None) -> None:
|
def __init__(self, file_path: Optional[Path] = None, obj=None) -> None:
|
||||||
if obj is None:
|
if obj is None:
|
||||||
obj = {}
|
obj = {}
|
||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
@ -92,9 +92,8 @@ class Spec:
|
|||||||
return self.obj.get(item, default)
|
return self.obj.get(item, default)
|
||||||
|
|
||||||
def init_from_file(self, file_path: Path):
|
def init_from_file(self, file_path: Path):
|
||||||
with file_path:
|
self.obj = get_yaml().load(open(file_path, "r"))
|
||||||
self.obj = get_yaml().load(open(file_path, "r"))
|
self.file_path = file_path
|
||||||
self.file_path = file_path
|
|
||||||
|
|
||||||
def get_image_registry(self):
|
def get_image_registry(self):
|
||||||
return self.obj.get(constants.image_registry_key)
|
return self.obj.get(constants.image_registry_key)
|
||||||
@ -106,10 +105,14 @@ class Spec:
|
|||||||
return self.obj.get(constants.configmaps_key, {})
|
return self.obj.get(constants.configmaps_key, {})
|
||||||
|
|
||||||
def get_container_resources(self):
|
def get_container_resources(self):
|
||||||
return Resources(self.obj.get(constants.resources_key, {}).get("containers", {}))
|
return Resources(
|
||||||
|
self.obj.get(constants.resources_key, {}).get("containers", {})
|
||||||
|
)
|
||||||
|
|
||||||
def get_volume_resources(self):
|
def get_volume_resources(self):
|
||||||
return Resources(self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {}))
|
return Resources(
|
||||||
|
self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {})
|
||||||
|
)
|
||||||
|
|
||||||
def get_http_proxy(self):
|
def get_http_proxy(self):
|
||||||
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, [])
|
||||||
@ -130,17 +133,57 @@ class Spec:
|
|||||||
return self.obj.get(constants.labels_key, {})
|
return self.obj.get(constants.labels_key, {})
|
||||||
|
|
||||||
def get_privileged(self):
|
def get_privileged(self):
|
||||||
return "true" == str(self.obj.get(constants.security_key, {}).get("privileged", "false")).lower()
|
return (
|
||||||
|
"true"
|
||||||
|
== str(
|
||||||
|
self.obj.get(constants.security_key, {}).get("privileged", "false")
|
||||||
|
).lower()
|
||||||
|
)
|
||||||
|
|
||||||
def get_capabilities(self):
|
def get_capabilities(self):
|
||||||
return self.obj.get(constants.security_key, {}).get("capabilities", [])
|
return self.obj.get(constants.security_key, {}).get("capabilities", [])
|
||||||
|
|
||||||
|
def get_unlimited_memlock(self):
|
||||||
|
return (
|
||||||
|
"true"
|
||||||
|
== str(
|
||||||
|
self.obj.get(constants.security_key, {}).get(
|
||||||
|
constants.unlimited_memlock_key, "false"
|
||||||
|
)
|
||||||
|
).lower()
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_runtime_class(self):
|
||||||
|
"""Get runtime class name from spec, or derive from security settings.
|
||||||
|
|
||||||
|
The runtime class determines which containerd runtime handler to use,
|
||||||
|
allowing different pods to have different rlimit profiles (e.g., for
|
||||||
|
unlimited RLIMIT_MEMLOCK).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Runtime class name string, or None to use default runtime.
|
||||||
|
"""
|
||||||
|
# Explicit runtime class takes precedence
|
||||||
|
explicit = self.obj.get(constants.security_key, {}).get(
|
||||||
|
constants.runtime_class_key, None
|
||||||
|
)
|
||||||
|
if explicit:
|
||||||
|
return explicit
|
||||||
|
|
||||||
|
# Auto-derive from unlimited-memlock setting
|
||||||
|
if self.get_unlimited_memlock():
|
||||||
|
return constants.high_memlock_runtime
|
||||||
|
|
||||||
|
return None # Use default runtime
|
||||||
|
|
||||||
def get_deployment_type(self):
|
def get_deployment_type(self):
|
||||||
return self.obj.get(constants.deploy_to_key)
|
return self.obj.get(constants.deploy_to_key)
|
||||||
|
|
||||||
def is_kubernetes_deployment(self):
|
def is_kubernetes_deployment(self):
|
||||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type,
|
return self.get_deployment_type() in [
|
||||||
constants.k8s_deploy_type]
|
constants.k8s_kind_deploy_type,
|
||||||
|
constants.k8s_deploy_type,
|
||||||
|
]
|
||||||
|
|
||||||
def is_kind_deployment(self):
|
def is_kind_deployment(self):
|
||||||
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
|
return self.get_deployment_type() in [constants.k8s_kind_deploy_type]
|
||||||
|
|||||||
@ -19,7 +19,6 @@ from stack_orchestrator.util import get_yaml
|
|||||||
|
|
||||||
|
|
||||||
class Stack:
|
class Stack:
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
obj: typing.Any
|
obj: typing.Any
|
||||||
|
|
||||||
@ -27,5 +26,4 @@ class Stack:
|
|||||||
self.name = name
|
self.name = name
|
||||||
|
|
||||||
def init_from_file(self, file_path: Path):
|
def init_from_file(self, file_path: Path):
|
||||||
with file_path:
|
self.obj = get_yaml().load(open(file_path, "r"))
|
||||||
self.obj = get_yaml().load(open(file_path, "r"))
|
|
||||||
|
|||||||
@ -27,7 +27,9 @@ from stack_orchestrator.deploy.deploy_types import DeployCommandContext
|
|||||||
|
|
||||||
def _fixup_container_tag(deployment_dir: str, image: str):
|
def _fixup_container_tag(deployment_dir: str, image: str):
|
||||||
deployment_dir_path = Path(deployment_dir)
|
deployment_dir_path = Path(deployment_dir)
|
||||||
compose_file = deployment_dir_path.joinpath("compose", "docker-compose-webapp-template.yml")
|
compose_file = deployment_dir_path.joinpath(
|
||||||
|
"compose", "docker-compose-webapp-template.yml"
|
||||||
|
)
|
||||||
# replace "cerc/webapp-container:local" in the file with our image tag
|
# replace "cerc/webapp-container:local" in the file with our image tag
|
||||||
with open(compose_file) as rfile:
|
with open(compose_file) as rfile:
|
||||||
contents = rfile.read()
|
contents = rfile.read()
|
||||||
@ -39,13 +41,13 @@ def _fixup_container_tag(deployment_dir: str, image: str):
|
|||||||
def _fixup_url_spec(spec_file_name: str, url: str):
|
def _fixup_url_spec(spec_file_name: str, url: str):
|
||||||
# url is like: https://example.com/path
|
# url is like: https://example.com/path
|
||||||
parsed_url = urlparse(url)
|
parsed_url = urlparse(url)
|
||||||
http_proxy_spec = f'''
|
http_proxy_spec = f"""
|
||||||
http-proxy:
|
http-proxy:
|
||||||
- host-name: {parsed_url.hostname}
|
- host-name: {parsed_url.hostname}
|
||||||
routes:
|
routes:
|
||||||
- path: '{parsed_url.path if parsed_url.path else "/"}'
|
- path: '{parsed_url.path if parsed_url.path else "/"}'
|
||||||
proxy-to: webapp:80
|
proxy-to: webapp:80
|
||||||
'''
|
"""
|
||||||
spec_file_path = Path(spec_file_name)
|
spec_file_path = Path(spec_file_name)
|
||||||
with open(spec_file_path) as rfile:
|
with open(spec_file_path) as rfile:
|
||||||
contents = rfile.read()
|
contents = rfile.read()
|
||||||
@ -54,11 +56,15 @@ def _fixup_url_spec(spec_file_name: str, url: str):
|
|||||||
wfile.write(contents)
|
wfile.write(contents)
|
||||||
|
|
||||||
|
|
||||||
def create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
def create_deployment(
|
||||||
|
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
|
||||||
|
):
|
||||||
# Do the equivalent of:
|
# Do the equivalent of:
|
||||||
# 1. laconic-so --stack webapp-template deploy --deploy-to k8s init --output webapp-spec.yml
|
# 1. laconic-so --stack webapp-template deploy --deploy-to k8s init \
|
||||||
|
# --output webapp-spec.yml
|
||||||
# --config (eqivalent of the contents of my-config.env)
|
# --config (eqivalent of the contents of my-config.env)
|
||||||
# 2. laconic-so --stack webapp-template deploy --deploy-to k8s create --deployment-dir test-deployment
|
# 2. laconic-so --stack webapp-template deploy --deploy-to k8s create \
|
||||||
|
# --deployment-dir test-deployment
|
||||||
# --spec-file webapp-spec.yml
|
# --spec-file webapp-spec.yml
|
||||||
# 3. Replace the container image tag with the specified image
|
# 3. Replace the container image tag with the specified image
|
||||||
deployment_dir_path = Path(deployment_dir)
|
deployment_dir_path = Path(deployment_dir)
|
||||||
@ -83,16 +89,12 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
|
|||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
spec_file_name,
|
spec_file_name,
|
||||||
None
|
None,
|
||||||
)
|
)
|
||||||
# Add the TLS and DNS spec
|
# Add the TLS and DNS spec
|
||||||
_fixup_url_spec(spec_file_name, url)
|
_fixup_url_spec(spec_file_name, url)
|
||||||
create_operation(
|
create_operation(
|
||||||
deploy_command_context,
|
deploy_command_context, spec_file_name, deployment_dir, False, None, None
|
||||||
spec_file_name,
|
|
||||||
deployment_dir,
|
|
||||||
None,
|
|
||||||
None
|
|
||||||
)
|
)
|
||||||
# Fix up the container tag inside the deployment compose file
|
# Fix up the container tag inside the deployment compose file
|
||||||
_fixup_container_tag(deployment_dir, image)
|
_fixup_container_tag(deployment_dir, image)
|
||||||
@ -102,7 +104,7 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
|
|||||||
@click.group()
|
@click.group()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx):
|
def command(ctx):
|
||||||
'''manage a webapp deployment'''
|
"""manage a webapp deployment"""
|
||||||
|
|
||||||
# Check that --stack wasn't supplied
|
# Check that --stack wasn't supplied
|
||||||
if ctx.parent.obj.stack:
|
if ctx.parent.obj.stack:
|
||||||
@ -111,13 +113,20 @@ def command(ctx):
|
|||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
||||||
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
|
@click.option(
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory", required=True)
|
"--image-registry",
|
||||||
|
help="Provide a container image registry url for this k8s cluster",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment-dir", help="Create deployment files in this directory", required=True
|
||||||
|
)
|
||||||
@click.option("--image", help="image to deploy", required=True)
|
@click.option("--image", help="image to deploy", required=True)
|
||||||
@click.option("--url", help="url to serve", required=True)
|
@click.option("--url", help="url to serve", required=True)
|
||||||
@click.option("--env-file", help="environment file for webapp")
|
@click.option("--env-file", help="environment file for webapp")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
def create(ctx, deployment_dir, image, url, kube_config, image_registry, env_file):
|
||||||
'''create a deployment for the specified webapp container'''
|
"""create a deployment for the specified webapp container"""
|
||||||
|
|
||||||
return create_deployment(ctx, deployment_dir, image, url, kube_config, image_registry, env_file)
|
return create_deployment(
|
||||||
|
ctx, deployment_dir, image, url, kube_config, image_registry, env_file
|
||||||
|
)
|
||||||
|
|||||||
@ -33,6 +33,7 @@ from stack_orchestrator.deploy.webapp.util import (
|
|||||||
LaconicRegistryClient,
|
LaconicRegistryClient,
|
||||||
TimedLogger,
|
TimedLogger,
|
||||||
build_container_image,
|
build_container_image,
|
||||||
|
confirm_auction,
|
||||||
push_container_image,
|
push_container_image,
|
||||||
file_hash,
|
file_hash,
|
||||||
deploy_to_k8s,
|
deploy_to_k8s,
|
||||||
@ -42,6 +43,7 @@ from stack_orchestrator.deploy.webapp.util import (
|
|||||||
match_owner,
|
match_owner,
|
||||||
skip_by_tag,
|
skip_by_tag,
|
||||||
confirm_payment,
|
confirm_payment,
|
||||||
|
load_known_requests,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -52,6 +54,7 @@ def process_app_deployment_request(
|
|||||||
deployment_record_namespace,
|
deployment_record_namespace,
|
||||||
dns_record_namespace,
|
dns_record_namespace,
|
||||||
default_dns_suffix,
|
default_dns_suffix,
|
||||||
|
dns_value,
|
||||||
deployment_parent_dir,
|
deployment_parent_dir,
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
@ -70,6 +73,7 @@ def process_app_deployment_request(
|
|||||||
app = laconic.get_record(
|
app = laconic.get_record(
|
||||||
app_deployment_request.attributes.application, require=True
|
app_deployment_request.attributes.application, require=True
|
||||||
)
|
)
|
||||||
|
assert app is not None # require=True ensures this
|
||||||
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
||||||
|
|
||||||
# 2. determine dns
|
# 2. determine dns
|
||||||
@ -109,7 +113,8 @@ def process_app_deployment_request(
|
|||||||
)
|
)
|
||||||
elif "preexisting" == fqdn_policy:
|
elif "preexisting" == fqdn_policy:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"No pre-existing DnsRecord {dns_lrn} could be found for request {app_deployment_request.id}."
|
f"No pre-existing DnsRecord {dns_lrn} could be found for "
|
||||||
|
f"request {app_deployment_request.id}."
|
||||||
)
|
)
|
||||||
|
|
||||||
# 4. get build and runtime config from request
|
# 4. get build and runtime config from request
|
||||||
@ -125,7 +130,8 @@ def process_app_deployment_request(
|
|||||||
parsed = AttrDict(yaml.safe_load(decrypted.data))
|
parsed = AttrDict(yaml.safe_load(decrypted.data))
|
||||||
if record_owner not in parsed.authorized:
|
if record_owner not in parsed.authorized:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"{record_owner} not authorized to access config {app_deployment_request.attributes.config.ref}"
|
f"{record_owner} not authorized to access config "
|
||||||
|
f"{app_deployment_request.attributes.config.ref}"
|
||||||
)
|
)
|
||||||
if "env" in parsed.config:
|
if "env" in parsed.config:
|
||||||
env.update(parsed.config.env)
|
env.update(parsed.config.env)
|
||||||
@ -153,8 +159,10 @@ def process_app_deployment_request(
|
|||||||
|
|
||||||
deployment_record = laconic.get_record(app_deployment_lrn)
|
deployment_record = laconic.get_record(app_deployment_lrn)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
||||||
# At present we use this to generate a unique but stable ID for the app's host container
|
# At present we use this to generate a unique but stable ID for the
|
||||||
# TODO: implement support to derive this transparently from the already-unique deployment id
|
# app's host container
|
||||||
|
# TODO: implement support to derive this transparently from the
|
||||||
|
# already-unique deployment id
|
||||||
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
|
unique_deployment_id = hashlib.md5(fqdn.encode()).hexdigest()[:16]
|
||||||
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
deployment_config_file = os.path.join(deployment_dir, "config.env")
|
||||||
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
|
deployment_container_tag = "laconic-webapp/%s:local" % unique_deployment_id
|
||||||
@ -163,11 +171,12 @@ def process_app_deployment_request(
|
|||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
if deployment_record:
|
if deployment_record:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Deployment record %s exists, but not deployment dir %s. Please remove name."
|
"Deployment record %s exists, but not deployment dir %s. "
|
||||||
% (app_deployment_lrn, deployment_dir)
|
"Please remove name." % (app_deployment_lrn, deployment_dir)
|
||||||
)
|
)
|
||||||
logger.log(
|
logger.log(
|
||||||
f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}"
|
f"Creating webapp deployment in: {deployment_dir} "
|
||||||
|
f"with container id: {deployment_container_tag}"
|
||||||
)
|
)
|
||||||
deploy_webapp.create_deployment(
|
deploy_webapp.create_deployment(
|
||||||
ctx,
|
ctx,
|
||||||
@ -184,7 +193,8 @@ def process_app_deployment_request(
|
|||||||
needs_k8s_deploy = False
|
needs_k8s_deploy = False
|
||||||
if force_rebuild:
|
if force_rebuild:
|
||||||
logger.log(
|
logger.log(
|
||||||
"--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app"
|
"--force-rebuild is enabled so the container will always be "
|
||||||
|
"built now, even if nothing has changed in the app"
|
||||||
)
|
)
|
||||||
# 6. build container (if needed)
|
# 6. build container (if needed)
|
||||||
# TODO: add a comment that explains what this code is doing (not clear to me)
|
# TODO: add a comment that explains what this code is doing (not clear to me)
|
||||||
@ -196,11 +206,12 @@ def process_app_deployment_request(
|
|||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
# check if the image already exists
|
# check if the image already exists
|
||||||
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
# Note: in the code below, calls to add_tags_to_image() won't work at present.
|
# Note: in the code below, calls to add_tags_to_image() won't
|
||||||
# This is because SO deployment code in general re-names the container image
|
# work at present. This is because SO deployment code in general
|
||||||
# to be unique to the deployment. This is done transparently
|
# re-names the container image to be unique to the deployment.
|
||||||
# and so when we call add_tags_to_image() here and try to add tags to the remote image,
|
# This is done transparently and so when we call add_tags_to_image()
|
||||||
# we get the image name wrong. Accordingly I've disabled the relevant code for now.
|
# here and try to add tags to the remote image, we get the image
|
||||||
|
# name wrong. Accordingly I've disabled the relevant code for now.
|
||||||
# This is safe because we are running with --force-rebuild at present
|
# This is safe because we are running with --force-rebuild at present
|
||||||
if shared_tag_exists and not force_rebuild:
|
if shared_tag_exists and not force_rebuild:
|
||||||
# simply add our unique tag to the existing image and we are done
|
# simply add our unique tag to the existing image and we are done
|
||||||
@ -208,7 +219,9 @@ def process_app_deployment_request(
|
|||||||
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
||||||
"tagging it with: {deployment_container_tag} to use in this deployment"
|
"tagging it with: {deployment_container_tag} to use in this deployment"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
# add_tags_to_image(
|
||||||
|
# image_registry, app_image_shared_tag, deployment_container_tag
|
||||||
|
# )
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
extra_build_args = [] # TODO: pull from request
|
extra_build_args = [] # TODO: pull from request
|
||||||
@ -220,11 +233,15 @@ def process_app_deployment_request(
|
|||||||
logger.log(f"Pushing container image: {deployment_container_tag}")
|
logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||||
push_container_image(deployment_dir, logger)
|
push_container_image(deployment_dir, logger)
|
||||||
logger.log("Push complete")
|
logger.log("Push complete")
|
||||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
# The build/push commands above will use the unique deployment
|
||||||
|
# tag, so now we need to add the shared tag.
|
||||||
logger.log(
|
logger.log(
|
||||||
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} "
|
||||||
|
f"to newly built image: {deployment_container_tag}"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
# add_tags_to_image(
|
||||||
|
# image_registry, deployment_container_tag, app_image_shared_tag
|
||||||
|
# )
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
logger.log("Requested app is already deployed, skipping build and image push")
|
logger.log("Requested app is already deployed, skipping build and image push")
|
||||||
@ -249,6 +266,7 @@ def process_app_deployment_request(
|
|||||||
dns_record,
|
dns_record,
|
||||||
dns_lrn,
|
dns_lrn,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
|
dns_value,
|
||||||
app_deployment_request,
|
app_deployment_request,
|
||||||
webapp_deployer_record,
|
webapp_deployer_record,
|
||||||
logger,
|
logger,
|
||||||
@ -257,12 +275,6 @@ def process_app_deployment_request(
|
|||||||
logger.log("END - process_app_deployment_request")
|
logger.log("END - process_app_deployment_request")
|
||||||
|
|
||||||
|
|
||||||
def load_known_requests(filename):
|
|
||||||
if filename and os.path.exists(filename):
|
|
||||||
return json.load(open(filename, "r"))
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def dump_known_requests(filename, requests, status="SEEN"):
|
def dump_known_requests(filename, requests, status="SEEN"):
|
||||||
if not filename:
|
if not filename:
|
||||||
return
|
return
|
||||||
@ -308,6 +320,11 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
help="How to handle requests with an FQDN: prohibit, allow, preexisting",
|
help="How to handle requests with an FQDN: prohibit, allow, preexisting",
|
||||||
default="prohibit",
|
default="prohibit",
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--ip",
|
||||||
|
help="IP address of the k8s deployment (to be set in DNS record)",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True)
|
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--record-namespace-deployments",
|
"--record-namespace-deployments",
|
||||||
@ -350,6 +367,12 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
"my payment address are examined).",
|
"my payment address are examined).",
|
||||||
is_flag=True,
|
is_flag=True,
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--auction-requests",
|
||||||
|
help="Handle requests with auction id set (skips payment confirmation).",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--config-upload-dir",
|
"--config-upload-dir",
|
||||||
help="The directory containing uploaded config.",
|
help="The directory containing uploaded config.",
|
||||||
@ -358,6 +381,11 @@ def dump_known_requests(filename, requests, status="SEEN"):
|
|||||||
@click.option(
|
@click.option(
|
||||||
"--private-key-file", help="The private key for decrypting config.", required=True
|
"--private-key-file", help="The private key for decrypting config.", required=True
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--private-key-passphrase",
|
"--private-key-passphrase",
|
||||||
help="The passphrase for the private key.",
|
help="The passphrase for the private key.",
|
||||||
@ -376,6 +404,7 @@ def command( # noqa: C901
|
|||||||
only_update_state,
|
only_update_state,
|
||||||
dns_suffix,
|
dns_suffix,
|
||||||
fqdn_policy,
|
fqdn_policy,
|
||||||
|
ip,
|
||||||
record_namespace_dns,
|
record_namespace_dns,
|
||||||
record_namespace_deployments,
|
record_namespace_deployments,
|
||||||
dry_run,
|
dry_run,
|
||||||
@ -390,6 +419,8 @@ def command( # noqa: C901
|
|||||||
private_key_file,
|
private_key_file,
|
||||||
private_key_passphrase,
|
private_key_passphrase,
|
||||||
all_requests,
|
all_requests,
|
||||||
|
auction_requests,
|
||||||
|
registry_lock_file,
|
||||||
):
|
):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
@ -410,7 +441,8 @@ def command( # noqa: C901
|
|||||||
or not dns_suffix
|
or not dns_suffix
|
||||||
):
|
):
|
||||||
print(
|
print(
|
||||||
"--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required",
|
"--dns-suffix, --record-namespace-dns, and "
|
||||||
|
"--record-namespace-deployments are all required",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
@ -422,6 +454,13 @@ def command( # noqa: C901
|
|||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
|
if fqdn_policy == "allow" and not ip:
|
||||||
|
print(
|
||||||
|
"--ip is required with 'allow' fqdn-policy",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
tempdir = tempfile.mkdtemp()
|
tempdir = tempfile.mkdtemp()
|
||||||
gpg = gnupg.GPG(gnupghome=tempdir)
|
gpg = gnupg.GPG(gnupghome=tempdir)
|
||||||
|
|
||||||
@ -441,19 +480,25 @@ def command( # noqa: C901
|
|||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr)
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
assert webapp_deployer_record is not None # require=True ensures this
|
||||||
|
assert webapp_deployer_record.attributes is not None
|
||||||
payment_address = webapp_deployer_record.attributes.paymentAddress
|
payment_address = webapp_deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
if min_required_payment and not payment_address:
|
if min_required_payment and not payment_address:
|
||||||
print(
|
print(
|
||||||
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
f"Minimum payment required, but no payment address listed "
|
||||||
|
f"for deployer: {lrn}.",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment requests.
|
# Find deployment requests.
|
||||||
|
requests = []
|
||||||
# single request
|
# single request
|
||||||
if request_id:
|
if request_id:
|
||||||
main_logger.log(f"Retrieving request {request_id}...")
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
@ -477,25 +522,35 @@ def command( # noqa: C901
|
|||||||
previous_requests = load_known_requests(state_file)
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
# Collapse related requests.
|
# Collapse related requests.
|
||||||
requests.sort(key=lambda r: r.createTime)
|
# Filter out None values and sort
|
||||||
requests.reverse()
|
valid_requests = [r for r in requests if r is not None]
|
||||||
|
valid_requests.sort(key=lambda r: r.createTime if r else "")
|
||||||
|
valid_requests.reverse()
|
||||||
requests_by_name = {}
|
requests_by_name = {}
|
||||||
skipped_by_name = {}
|
skipped_by_name = {}
|
||||||
for r in requests:
|
for r in valid_requests:
|
||||||
main_logger.log(f"BEGIN: Examining request {r.id}")
|
if not r:
|
||||||
|
continue
|
||||||
|
r_id = r.id if r else "unknown"
|
||||||
|
main_logger.log(f"BEGIN: Examining request {r_id}")
|
||||||
result = "PENDING"
|
result = "PENDING"
|
||||||
try:
|
try:
|
||||||
if (
|
if (
|
||||||
r.id in previous_requests
|
r_id in previous_requests
|
||||||
and previous_requests[r.id].get("status", "") != "RETRY"
|
and previous_requests[r_id].get("status", "") != "RETRY"
|
||||||
):
|
):
|
||||||
main_logger.log(f"Skipping request {r.id}, we've already seen it.")
|
main_logger.log(f"Skipping request {r_id}, we've already seen it.")
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if not r.attributes:
|
||||||
|
main_logger.log(f"Skipping request {r_id}, no attributes.")
|
||||||
|
result = "ERROR"
|
||||||
|
continue
|
||||||
|
|
||||||
app = laconic.get_record(r.attributes.application)
|
app = laconic.get_record(r.attributes.application)
|
||||||
if not app:
|
if not app:
|
||||||
main_logger.log(f"Skipping request {r.id}, cannot locate app.")
|
main_logger.log(f"Skipping request {r_id}, cannot locate app.")
|
||||||
result = "ERROR"
|
result = "ERROR"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -503,7 +558,7 @@ def command( # noqa: C901
|
|||||||
if not requested_name:
|
if not requested_name:
|
||||||
requested_name = generate_hostname_for_app(app)
|
requested_name = generate_hostname_for_app(app)
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Generating name %s for request %s." % (requested_name, r.id)
|
"Generating name %s for request %s." % (requested_name, r_id)
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
@ -511,30 +566,33 @@ def command( # noqa: C901
|
|||||||
or requested_name in requests_by_name
|
or requested_name in requests_by_name
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Ignoring request %s, it has been superseded." % r.id
|
"Ignoring request %s, it has been superseded." % r_id
|
||||||
)
|
)
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if skip_by_tag(r, include_tags, exclude_tags):
|
if skip_by_tag(r, include_tags, exclude_tags):
|
||||||
|
r_tags = r.attributes.tags if r.attributes else None
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Skipping request %s, filtered by tag (include %s, exclude %s, present %s)"
|
"Skipping request %s, filtered by tag "
|
||||||
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
"(include %s, exclude %s, present %s)"
|
||||||
|
% (r_id, include_tags, exclude_tags, r_tags)
|
||||||
)
|
)
|
||||||
skipped_by_name[requested_name] = r
|
skipped_by_name[requested_name] = r
|
||||||
result = "SKIP"
|
result = "SKIP"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
r_app = r.attributes.application if r.attributes else "unknown"
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Found pending request %s to run application %s on %s."
|
"Found pending request %s to run application %s on %s."
|
||||||
% (r.id, r.attributes.application, requested_name)
|
% (r_id, r_app, requested_name)
|
||||||
)
|
)
|
||||||
requests_by_name[requested_name] = r
|
requests_by_name[requested_name] = r
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
result = "ERROR"
|
result = "ERROR"
|
||||||
main_logger.log(f"ERROR examining request {r.id}: " + str(e))
|
main_logger.log(f"ERROR examining request {r_id}: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
main_logger.log(f"DONE Examining request {r.id} with result {result}.")
|
main_logger.log(f"DONE Examining request {r_id} with result {result}.")
|
||||||
if result in ["ERROR"]:
|
if result in ["ERROR"]:
|
||||||
dump_known_requests(state_file, [r], status=result)
|
dump_known_requests(state_file, [r], status=result)
|
||||||
|
|
||||||
@ -563,11 +621,13 @@ def command( # noqa: C901
|
|||||||
cancellation_requests[r.id], r
|
cancellation_requests[r.id], r
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found deployment cancellation request for {r.id} at {cancellation_requests[r.id].id}"
|
f"Found deployment cancellation request for {r.id} "
|
||||||
|
f"at {cancellation_requests[r.id].id}"
|
||||||
)
|
)
|
||||||
elif r.id in deployments_by_request:
|
elif r.id in deployments_by_request:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found satisfied request for {r.id} at {deployments_by_request[r.id].id}"
|
f"Found satisfied request for {r.id} "
|
||||||
|
f"at {deployments_by_request[r.id].id}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if (
|
if (
|
||||||
@ -575,15 +635,31 @@ def command( # noqa: C901
|
|||||||
and previous_requests[r.id].get("status", "") != "RETRY"
|
and previous_requests[r.id].get("status", "") != "RETRY"
|
||||||
):
|
):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
f"Skipping unsatisfied request {r.id} "
|
||||||
|
"because we have seen it before."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
main_logger.log(f"Request {r.id} needs to processed.")
|
main_logger.log(f"Request {r.id} needs to processed.")
|
||||||
requests_to_check_for_payment.append(r)
|
requests_to_check_for_payment.append(r)
|
||||||
|
|
||||||
requests_to_execute = []
|
requests_to_execute = []
|
||||||
if min_required_payment:
|
for r in requests_to_check_for_payment:
|
||||||
for r in requests_to_check_for_payment:
|
if r.attributes.auction:
|
||||||
|
if auction_requests:
|
||||||
|
if confirm_auction(laconic, r, lrn, payment_address, main_logger):
|
||||||
|
main_logger.log(f"{r.id}: Auction confirmed.")
|
||||||
|
requests_to_execute.append(r)
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping request {r.id}: unable to verify auction."
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status="SKIP")
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping request {r.id}: not handling requests with auction."
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status="SKIP")
|
||||||
|
elif min_required_payment:
|
||||||
main_logger.log(f"{r.id}: Confirming payment...")
|
main_logger.log(f"{r.id}: Confirming payment...")
|
||||||
if confirm_payment(
|
if confirm_payment(
|
||||||
laconic,
|
laconic,
|
||||||
@ -599,8 +675,8 @@ def command( # noqa: C901
|
|||||||
f"Skipping request {r.id}: unable to verify payment."
|
f"Skipping request {r.id}: unable to verify payment."
|
||||||
)
|
)
|
||||||
dump_known_requests(state_file, [r], status="UNPAID")
|
dump_known_requests(state_file, [r], status="UNPAID")
|
||||||
else:
|
else:
|
||||||
requests_to_execute = requests_to_check_for_payment
|
requests_to_execute.append(r)
|
||||||
|
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
|
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
|
||||||
@ -613,8 +689,12 @@ def command( # noqa: C901
|
|||||||
status = "ERROR"
|
status = "ERROR"
|
||||||
run_log_file = None
|
run_log_file = None
|
||||||
run_reg_client = laconic
|
run_reg_client = laconic
|
||||||
|
build_logger = None
|
||||||
try:
|
try:
|
||||||
run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
run_id = (
|
||||||
|
f"{r.id}-{str(time.time()).split('.')[0]}-"
|
||||||
|
f"{str(uuid.uuid4()).split('-')[0]}"
|
||||||
|
)
|
||||||
if log_dir:
|
if log_dir:
|
||||||
run_log_dir = os.path.join(log_dir, r.id)
|
run_log_dir = os.path.join(log_dir, r.id)
|
||||||
if not os.path.exists(run_log_dir):
|
if not os.path.exists(run_log_dir):
|
||||||
@ -625,7 +705,9 @@ def command( # noqa: C901
|
|||||||
)
|
)
|
||||||
run_log_file = open(run_log_file_path, "wt")
|
run_log_file = open(run_log_file_path, "wt")
|
||||||
run_reg_client = LaconicRegistryClient(
|
run_reg_client = LaconicRegistryClient(
|
||||||
laconic_config, log_file=run_log_file
|
laconic_config,
|
||||||
|
log_file=run_log_file,
|
||||||
|
mutex_lock_file=registry_lock_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
build_logger = TimedLogger(run_id, run_log_file)
|
build_logger = TimedLogger(run_id, run_log_file)
|
||||||
@ -637,6 +719,7 @@ def command( # noqa: C901
|
|||||||
record_namespace_deployments,
|
record_namespace_deployments,
|
||||||
record_namespace_dns,
|
record_namespace_dns,
|
||||||
dns_suffix,
|
dns_suffix,
|
||||||
|
ip,
|
||||||
os.path.abspath(deployment_parent_dir),
|
os.path.abspath(deployment_parent_dir),
|
||||||
kube_config,
|
kube_config,
|
||||||
image_registry,
|
image_registry,
|
||||||
@ -652,7 +735,8 @@ def command( # noqa: C901
|
|||||||
status = "DEPLOYED"
|
status = "DEPLOYED"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
main_logger.log(f"ERROR {r.id}:" + str(e))
|
main_logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
build_logger.log("ERROR: " + str(e))
|
if build_logger:
|
||||||
|
build_logger.log("ERROR: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
main_logger.log(f"DEPLOYING {r.id}: END - {status}")
|
main_logger.log(f"DEPLOYING {r.id}: END - {status}")
|
||||||
if build_logger:
|
if build_logger:
|
||||||
|
|||||||
249
stack_orchestrator/deploy/webapp/handle_deployment_auction.py
Normal file
249
stack_orchestrator/deploy/webapp/handle_deployment_auction.py
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AttrDict,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
TimedLogger,
|
||||||
|
load_known_requests,
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
AuctionStatus,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def process_app_deployment_auction(
|
||||||
|
ctx,
|
||||||
|
laconic: LaconicRegistryClient,
|
||||||
|
request,
|
||||||
|
current_status,
|
||||||
|
reveal_file_path,
|
||||||
|
bid_amount,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
# Fetch auction details
|
||||||
|
auction_id = request.attributes.auction
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
if not auction:
|
||||||
|
raise Exception(f"Unable to locate auction: {auction_id}")
|
||||||
|
|
||||||
|
# Check auction kind
|
||||||
|
if auction.kind != AUCTION_KIND_PROVIDER:
|
||||||
|
raise Exception(
|
||||||
|
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if current_status == "PENDING":
|
||||||
|
# Skip if pending auction not in commit state
|
||||||
|
if auction.status != AuctionStatus.COMMIT:
|
||||||
|
logger.log(
|
||||||
|
f"Skipping pending request, auction {auction_id} "
|
||||||
|
f"status: {auction.status}"
|
||||||
|
)
|
||||||
|
return "SKIP", ""
|
||||||
|
|
||||||
|
# Check max_price
|
||||||
|
bid_amount_int = int(bid_amount)
|
||||||
|
max_price_int = int(auction.maxPrice.quantity)
|
||||||
|
if max_price_int < bid_amount_int:
|
||||||
|
logger.log(
|
||||||
|
f"Skipping auction {auction_id} with max_price ({max_price_int}) "
|
||||||
|
f"less than bid_amount ({bid_amount_int})"
|
||||||
|
)
|
||||||
|
return "SKIP", ""
|
||||||
|
|
||||||
|
# Bid on the auction
|
||||||
|
reveal_file_path = laconic.commit_bid(auction_id, bid_amount_int)
|
||||||
|
logger.log(f"Commited bid on auction {auction_id} with amount {bid_amount_int}")
|
||||||
|
|
||||||
|
return "COMMIT", reveal_file_path
|
||||||
|
|
||||||
|
if current_status == "COMMIT":
|
||||||
|
# Return if auction still in commit state
|
||||||
|
if auction.status == AuctionStatus.COMMIT:
|
||||||
|
logger.log(f"Auction {auction_id} status: {auction.status}")
|
||||||
|
return current_status, reveal_file_path
|
||||||
|
|
||||||
|
# Reveal bid
|
||||||
|
if auction.status == AuctionStatus.REVEAL:
|
||||||
|
laconic.reveal_bid(auction_id, reveal_file_path)
|
||||||
|
logger.log(f"Revealed bid on auction {auction_id}")
|
||||||
|
|
||||||
|
return "REVEAL", reveal_file_path
|
||||||
|
|
||||||
|
raise Exception(f"Unexpected auction {auction_id} status: {auction.status}")
|
||||||
|
|
||||||
|
if current_status == "REVEAL":
|
||||||
|
# Return if auction still in reveal state
|
||||||
|
if auction.status == AuctionStatus.REVEAL:
|
||||||
|
logger.log(f"Auction {auction_id} status: {auction.status}")
|
||||||
|
return current_status, reveal_file_path
|
||||||
|
|
||||||
|
# Return if auction is completed
|
||||||
|
if auction.status == AuctionStatus.COMPLETED:
|
||||||
|
logger.log(f"Auction {auction_id} completed")
|
||||||
|
return "COMPLETED", ""
|
||||||
|
|
||||||
|
raise Exception(f"Unexpected auction {auction_id} status: {auction.status}")
|
||||||
|
|
||||||
|
raise Exception(f"Got request with unexpected status: {current_status}")
|
||||||
|
|
||||||
|
|
||||||
|
def dump_known_auction_requests(filename, requests, status="SEEN"):
|
||||||
|
if not filename:
|
||||||
|
return
|
||||||
|
known_requests = load_known_requests(filename)
|
||||||
|
for r in requests:
|
||||||
|
known_requests[r.id] = {"revealFile": r.revealFile, "status": status}
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
json.dump(known_requests, f)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--state-file",
|
||||||
|
help="File to store state about previously seen auction requests.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--bid-amount",
|
||||||
|
help="Bid to place on application deployment auctions (in alnt)",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
state_file,
|
||||||
|
bid_amount,
|
||||||
|
registry_lock_file,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
if int(bid_amount) < 0:
|
||||||
|
print("--bid-amount cannot be less than 0", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
logger = TimedLogger(file=sys.stderr)
|
||||||
|
|
||||||
|
try:
|
||||||
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
|
auctions_requests = laconic.app_deployment_auctions()
|
||||||
|
|
||||||
|
previous_requests = {}
|
||||||
|
logger.log(f"Loading known auctions from {state_file}...")
|
||||||
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
|
# Process new requests first
|
||||||
|
auctions_requests.sort(key=lambda r: r.createTime)
|
||||||
|
auctions_requests.reverse()
|
||||||
|
|
||||||
|
requests_to_execute = []
|
||||||
|
|
||||||
|
for r in auctions_requests:
|
||||||
|
logger.log(f"BEGIN: Examining request {r.id}")
|
||||||
|
result_status = "PENDING"
|
||||||
|
reveal_file_path = ""
|
||||||
|
try:
|
||||||
|
application = r.attributes.application
|
||||||
|
|
||||||
|
# Handle already seen requests
|
||||||
|
if r.id in previous_requests:
|
||||||
|
# If it's not in commit or reveal status, skip the request as we've
|
||||||
|
# already seen it
|
||||||
|
current_status = previous_requests[r.id].get("status", "")
|
||||||
|
result_status = current_status
|
||||||
|
if current_status not in ["COMMIT", "REVEAL"]:
|
||||||
|
logger.log(f"Skipping request {r.id}, we've already seen it.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
reveal_file_path = previous_requests[r.id].get("revealFile", "")
|
||||||
|
logger.log(
|
||||||
|
f"Found existing auction request {r.id} for application "
|
||||||
|
f"{application}, status {current_status}."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# It's a fresh request, check application record
|
||||||
|
app = laconic.get_record(application)
|
||||||
|
if not app:
|
||||||
|
logger.log(f"Skipping request {r.id}, cannot locate app.")
|
||||||
|
result_status = "ERROR"
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.log(
|
||||||
|
f"Found pending auction request {r.id} for application "
|
||||||
|
f"{application}."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add requests to be processed
|
||||||
|
requests_to_execute.append((r, result_status, reveal_file_path))
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
result_status = "ERROR"
|
||||||
|
logger.log(f"ERROR: examining request {r.id}: " + str(e))
|
||||||
|
finally:
|
||||||
|
logger.log(
|
||||||
|
f"DONE: Examining request {r.id} with result {result_status}."
|
||||||
|
)
|
||||||
|
if result_status in ["ERROR"]:
|
||||||
|
dump_known_auction_requests(
|
||||||
|
state_file,
|
||||||
|
[AttrDict({"id": r.id, "revealFile": reveal_file_path})],
|
||||||
|
result_status,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.log(f"Found {len(requests_to_execute)} request(s) to process.")
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
for r, current_status, reveal_file_path in requests_to_execute:
|
||||||
|
logger.log(f"Processing {r.id}: BEGIN")
|
||||||
|
result_status = "ERROR"
|
||||||
|
try:
|
||||||
|
result_status, reveal_file_path = process_app_deployment_auction(
|
||||||
|
ctx,
|
||||||
|
laconic,
|
||||||
|
r,
|
||||||
|
current_status,
|
||||||
|
reveal_file_path,
|
||||||
|
bid_amount,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
|
finally:
|
||||||
|
logger.log(f"Processing {r.id}: END - {result_status}")
|
||||||
|
dump_known_auction_requests(
|
||||||
|
state_file,
|
||||||
|
[AttrDict({"id": r.id, "revealFile": reveal_file_path})],
|
||||||
|
result_status,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.log("UNCAUGHT ERROR:" + str(e))
|
||||||
|
raise e
|
||||||
124
stack_orchestrator/deploy/webapp/publish_deployment_auction.py
Normal file
124
stack_orchestrator/deploy/webapp/publish_deployment_auction.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import click
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
TOKEN_DENOM,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fatal(msg: str):
|
||||||
|
print(msg, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--app",
|
||||||
|
help="The LRN of the application to deploy.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--commits-duration",
|
||||||
|
help="Auction commits duration (in seconds) (default: 600).",
|
||||||
|
default=600,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--reveals-duration",
|
||||||
|
help="Auction reveals duration (in seconds) (default: 600).",
|
||||||
|
default=600,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--commit-fee",
|
||||||
|
help="Auction bid commit fee (in alnt) (default: 100000).",
|
||||||
|
default=100000,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--reveal-fee",
|
||||||
|
help="Auction bid reveal fee (in alnt) (default: 100000).",
|
||||||
|
default=100000,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--max-price",
|
||||||
|
help="Max acceptable bid price (in alnt).",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--num-providers",
|
||||||
|
help="Max acceptable bid price (in alnt).",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
app,
|
||||||
|
commits_duration,
|
||||||
|
reveals_duration,
|
||||||
|
commit_fee,
|
||||||
|
reveal_fee,
|
||||||
|
max_price,
|
||||||
|
num_providers,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
app_record = laconic.get_record(app)
|
||||||
|
if not app_record:
|
||||||
|
fatal(f"Unable to locate app: {app}")
|
||||||
|
|
||||||
|
provider_auction_params = {
|
||||||
|
"kind": AUCTION_KIND_PROVIDER,
|
||||||
|
"commits_duration": commits_duration,
|
||||||
|
"reveals_duration": reveals_duration,
|
||||||
|
"denom": TOKEN_DENOM,
|
||||||
|
"commit_fee": commit_fee,
|
||||||
|
"reveal_fee": reveal_fee,
|
||||||
|
"max_price": max_price,
|
||||||
|
"num_providers": num_providers,
|
||||||
|
}
|
||||||
|
auction_id = laconic.create_deployment_auction(provider_auction_params)
|
||||||
|
print("Deployment auction created:", auction_id)
|
||||||
|
|
||||||
|
if not auction_id:
|
||||||
|
fatal("Unable to create a provider auction")
|
||||||
|
|
||||||
|
deployment_auction = {
|
||||||
|
"record": {
|
||||||
|
"type": "ApplicationDeploymentAuction",
|
||||||
|
"application": app,
|
||||||
|
"auction": auction_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(yaml.dump(deployment_auction))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Publish the deployment auction record
|
||||||
|
laconic.publish(deployment_auction)
|
||||||
@ -64,7 +64,11 @@ def command( # noqa: C901
|
|||||||
):
|
):
|
||||||
laconic = LaconicRegistryClient(laconic_config)
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
if not payment_address:
|
if not payment_address:
|
||||||
payment_address = laconic.whoami().address
|
whoami_result = laconic.whoami()
|
||||||
|
if whoami_result and whoami_result.address:
|
||||||
|
payment_address = whoami_result.address
|
||||||
|
else:
|
||||||
|
raise ValueError("Could not determine payment address from laconic whoami")
|
||||||
|
|
||||||
pub_key = base64.b64encode(open(public_key_file, "rb").read()).decode("ASCII")
|
pub_key = base64.b64encode(open(public_key_file, "rb").read()).decode("ASCII")
|
||||||
hostname = urlparse(api_url).hostname
|
hostname = urlparse(api_url).hostname
|
||||||
|
|||||||
79
stack_orchestrator/deploy/webapp/registry_mutex.py
Normal file
79
stack_orchestrator/deploy/webapp/registry_mutex.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
from functools import wraps
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Define default file path for the lock
|
||||||
|
DEFAULT_LOCK_FILE_PATH = "/tmp/registry_mutex_lock_file"
|
||||||
|
LOCK_TIMEOUT = 30
|
||||||
|
LOCK_RETRY_INTERVAL = 3
|
||||||
|
|
||||||
|
|
||||||
|
def acquire_lock(client, lock_file_path, timeout):
|
||||||
|
# Lock alreay acquired by the current client
|
||||||
|
if client.mutex_lock_acquired:
|
||||||
|
return
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Check if lock file exists and is potentially stale
|
||||||
|
if os.path.exists(lock_file_path):
|
||||||
|
with open(lock_file_path, "r") as lock_file:
|
||||||
|
timestamp = float(lock_file.read().strip())
|
||||||
|
|
||||||
|
# If lock is stale, remove the lock file
|
||||||
|
if time.time() - timestamp > timeout:
|
||||||
|
print(f"Stale lock detected, removing lock file {lock_file_path}")
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Lock file {lock_file_path} exists and is recent, waiting..."
|
||||||
|
)
|
||||||
|
time.sleep(LOCK_RETRY_INTERVAL)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try to create a new lock file with the current timestamp
|
||||||
|
fd = os.open(lock_file_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||||
|
with os.fdopen(fd, "w") as lock_file:
|
||||||
|
lock_file.write(str(time.time()))
|
||||||
|
|
||||||
|
client.mutex_lock_acquired = True
|
||||||
|
print(f"Registry lock acquired, {lock_file_path}")
|
||||||
|
|
||||||
|
# Lock successfully acquired
|
||||||
|
return
|
||||||
|
|
||||||
|
except FileExistsError:
|
||||||
|
print(f"Lock file {lock_file_path} exists, waiting...")
|
||||||
|
time.sleep(LOCK_RETRY_INTERVAL)
|
||||||
|
|
||||||
|
|
||||||
|
def release_lock(client, lock_file_path):
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
|
||||||
|
client.mutex_lock_acquired = False
|
||||||
|
print(f"Registry lock released, {lock_file_path}")
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Lock file already removed
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def registry_mutex():
|
||||||
|
def decorator(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
lock_file_path = DEFAULT_LOCK_FILE_PATH
|
||||||
|
if self.mutex_lock_file:
|
||||||
|
lock_file_path = self.mutex_lock_file
|
||||||
|
|
||||||
|
# Acquire the lock before running the function
|
||||||
|
acquire_lock(self, lock_file_path, LOCK_TIMEOUT)
|
||||||
|
try:
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
finally:
|
||||||
|
# Release the lock after the function completes
|
||||||
|
release_lock(self, lock_file_path)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
||||||
@ -3,7 +3,6 @@
|
|||||||
# it under the terms of the GNU Affero General Public License as published by
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
# (at your option) any later version.
|
# (at your option) any later version.
|
||||||
import base64
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
# This program is distributed in the hope that it will be useful,
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
@ -17,6 +16,8 @@ import shutil
|
|||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import NoReturn
|
||||||
|
import base64
|
||||||
|
|
||||||
import gnupg
|
import gnupg
|
||||||
import click
|
import click
|
||||||
@ -24,12 +25,14 @@ import requests
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from stack_orchestrator.deploy.webapp.util import (
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
AuctionStatus,
|
||||||
LaconicRegistryClient,
|
LaconicRegistryClient,
|
||||||
)
|
)
|
||||||
from dotenv import dotenv_values
|
from dotenv import dotenv_values
|
||||||
|
|
||||||
|
|
||||||
def fatal(msg: str):
|
def fatal(msg: str) -> NoReturn:
|
||||||
print(msg, file=sys.stderr)
|
print(msg, file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -43,16 +46,22 @@ def fatal(msg: str):
|
|||||||
help="The LRN of the application to deploy.",
|
help="The LRN of the application to deploy.",
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--auction-id",
|
||||||
|
help="Deployment auction id. Can be used instead of deployer and payment.",
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--deployer",
|
"--deployer",
|
||||||
help="The LRN of the deployer to process this request.",
|
help="The LRN of the deployer to process this request.",
|
||||||
required=True,
|
|
||||||
)
|
)
|
||||||
@click.option("--env-file", help="environment file for webapp")
|
@click.option("--env-file", help="environment file for webapp")
|
||||||
@click.option("--config-ref", help="The ref of an existing config upload to use.")
|
@click.option("--config-ref", help="The ref of an existing config upload to use.")
|
||||||
@click.option(
|
@click.option(
|
||||||
"--make-payment",
|
"--make-payment",
|
||||||
help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.",
|
help=(
|
||||||
|
"The payment to make (in alnt). The value should be a number or "
|
||||||
|
"'auto' to use the deployer's minimum required payment."
|
||||||
|
),
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
@ -64,10 +73,11 @@ def fatal(msg: str):
|
|||||||
is_flag=True,
|
is_flag=True,
|
||||||
)
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command( # noqa: C901
|
||||||
ctx,
|
ctx,
|
||||||
laconic_config,
|
laconic_config,
|
||||||
app,
|
app,
|
||||||
|
auction_id,
|
||||||
deployer,
|
deployer,
|
||||||
env_file,
|
env_file,
|
||||||
config_ref,
|
config_ref,
|
||||||
@ -75,70 +85,182 @@ def command(
|
|||||||
use_payment,
|
use_payment,
|
||||||
dns,
|
dns,
|
||||||
dry_run,
|
dry_run,
|
||||||
): # noqa: C901
|
):
|
||||||
tempdir = tempfile.mkdtemp()
|
if auction_id and deployer:
|
||||||
try:
|
print("Cannot specify both --auction-id and --deployer", file=sys.stderr)
|
||||||
laconic = LaconicRegistryClient(laconic_config)
|
sys.exit(2)
|
||||||
|
|
||||||
app_record = laconic.get_record(app)
|
if not auction_id and not deployer:
|
||||||
if not app_record:
|
print("Must specify either --auction-id or --deployer", file=sys.stderr)
|
||||||
fatal(f"Unable to locate app: {app}")
|
sys.exit(2)
|
||||||
|
|
||||||
|
if auction_id and (make_payment or use_payment):
|
||||||
|
print(
|
||||||
|
"Cannot specify --auction-id with --make-payment or --use-payment",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if env_file and config_ref:
|
||||||
|
fatal("Cannot use --env-file and --config-ref at the same time.")
|
||||||
|
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
app_record = laconic.get_record(app)
|
||||||
|
if not app_record:
|
||||||
|
fatal(f"Unable to locate app: {app}")
|
||||||
|
|
||||||
|
# Deployers to send requests to
|
||||||
|
deployer_records = []
|
||||||
|
|
||||||
|
auction = None
|
||||||
|
auction_winners = None
|
||||||
|
if auction_id:
|
||||||
|
# Fetch auction record for given auction
|
||||||
|
auction_records_by_id = laconic.app_deployment_auctions({"auction": auction_id})
|
||||||
|
if len(auction_records_by_id) == 0:
|
||||||
|
fatal(f"Unable to locate record for auction: {auction_id}")
|
||||||
|
|
||||||
|
# Cross check app against application in the auction record
|
||||||
|
auction_app = auction_records_by_id[0].attributes.application
|
||||||
|
if auction_app != app:
|
||||||
|
fatal(
|
||||||
|
f"Requested application {app} does not match application "
|
||||||
|
f"from auction record {auction_app}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fetch auction details
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
if not auction:
|
||||||
|
fatal(f"Unable to locate auction: {auction_id}")
|
||||||
|
|
||||||
|
# Check auction owner
|
||||||
|
whoami = laconic.whoami()
|
||||||
|
if not whoami or not whoami.address:
|
||||||
|
fatal("Unable to determine current account address")
|
||||||
|
if auction.ownerAddress != whoami.address:
|
||||||
|
fatal(f"Auction {auction_id} owner mismatch")
|
||||||
|
|
||||||
|
# Check auction kind
|
||||||
|
auction_kind = auction.kind if auction else None
|
||||||
|
if auction_kind != AUCTION_KIND_PROVIDER:
|
||||||
|
fatal(
|
||||||
|
f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction_kind}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check auction status
|
||||||
|
auction_status = auction.status if auction else None
|
||||||
|
if auction_status != AuctionStatus.COMPLETED:
|
||||||
|
fatal(f"Auction {auction_id} not completed yet, status {auction_status}")
|
||||||
|
|
||||||
|
# Check that winner list is not empty
|
||||||
|
winner_addresses = auction.winnerAddresses if auction else []
|
||||||
|
if not winner_addresses or len(winner_addresses) == 0:
|
||||||
|
fatal(f"Auction {auction_id} has no winners")
|
||||||
|
|
||||||
|
auction_winners = winner_addresses
|
||||||
|
|
||||||
|
# Get deployer record for all the auction winners
|
||||||
|
for auction_winner in auction_winners:
|
||||||
|
# TODO: Match auction winner address with provider address?
|
||||||
|
deployer_records_by_owner = laconic.webapp_deployers(
|
||||||
|
{"paymentAddress": auction_winner}
|
||||||
|
)
|
||||||
|
if len(deployer_records_by_owner) == 0:
|
||||||
|
print(
|
||||||
|
f"WARNING: Unable to locate deployer for auction winner "
|
||||||
|
f"{auction_winner}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Take first record with name set
|
||||||
|
target_deployer_record = deployer_records_by_owner[0]
|
||||||
|
for r in deployer_records_by_owner:
|
||||||
|
if len(r.names) > 0:
|
||||||
|
target_deployer_record = r
|
||||||
|
break
|
||||||
|
deployer_records.append(target_deployer_record)
|
||||||
|
else:
|
||||||
deployer_record = laconic.get_record(deployer)
|
deployer_record = laconic.get_record(deployer)
|
||||||
if not deployer_record:
|
if not deployer_record:
|
||||||
fatal(f"Unable to locate deployer: {deployer}")
|
fatal(f"Unable to locate deployer: {deployer}")
|
||||||
|
|
||||||
if env_file and config_ref:
|
deployer_records.append(deployer_record)
|
||||||
fatal("Cannot use --env-file and --config-ref at the same time.")
|
|
||||||
|
|
||||||
# If env_file
|
# Create and send request to each deployer
|
||||||
|
deployment_requests = []
|
||||||
|
for deployer_record in deployer_records:
|
||||||
|
# Upload config to deployers if env_file is passed
|
||||||
if env_file:
|
if env_file:
|
||||||
gpg = gnupg.GPG(gnupghome=tempdir)
|
tempdir = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
gpg = gnupg.GPG(gnupghome=tempdir)
|
||||||
|
|
||||||
# Import the deployer's public key
|
# Import the deployer's public key
|
||||||
result = gpg.import_keys(
|
result = gpg.import_keys(
|
||||||
base64.b64decode(deployer_record.attributes.publicKey)
|
base64.b64decode(deployer_record.attributes.publicKey)
|
||||||
)
|
)
|
||||||
if 1 != result.imported:
|
if 1 != result.imported:
|
||||||
fatal("Failed to import deployer's public key.")
|
fatal("Failed to import deployer's public key.")
|
||||||
|
|
||||||
recip = gpg.list_keys()[0]["uids"][0]
|
recip = gpg.list_keys()[0]["uids"][0]
|
||||||
|
|
||||||
# Wrap the config
|
# Wrap the config
|
||||||
config = {
|
whoami_result = laconic.whoami()
|
||||||
# Include account (and payment?) details
|
if not whoami_result or not whoami_result.address:
|
||||||
"authorized": [laconic.whoami().address],
|
fatal("Unable to determine current account address")
|
||||||
"config": {"env": dict(dotenv_values(env_file))},
|
config = {
|
||||||
}
|
# Include account (and payment?) details
|
||||||
serialized = yaml.dump(config)
|
"authorized": [whoami_result.address],
|
||||||
|
"config": {"env": dict(dotenv_values(env_file))},
|
||||||
|
}
|
||||||
|
serialized = yaml.dump(config)
|
||||||
|
|
||||||
# Encrypt
|
# Encrypt
|
||||||
result = gpg.encrypt(serialized, recip, always_trust=True, armor=False)
|
result = gpg.encrypt(serialized, recip, always_trust=True, armor=False)
|
||||||
if not result.ok:
|
if not result.ok:
|
||||||
fatal("Failed to encrypt config.")
|
fatal("Failed to encrypt config.")
|
||||||
|
|
||||||
# Upload it to the deployer's API
|
# Upload it to the deployer's API
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
f"{deployer_record.attributes.apiUrl}/upload/config",
|
f"{deployer_record.attributes.apiUrl}/upload/config",
|
||||||
data=result.data,
|
data=result.data,
|
||||||
headers={"Content-Type": "application/octet-stream"},
|
headers={"Content-Type": "application/octet-stream"},
|
||||||
)
|
)
|
||||||
if not response.ok:
|
if not response.ok:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
config_ref = response.json()["id"]
|
config_ref = response.json()["id"]
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tempdir, ignore_errors=True)
|
||||||
|
|
||||||
|
target_deployer = deployer
|
||||||
|
if (not deployer) and len(deployer_record.names):
|
||||||
|
target_deployer = deployer_record.names[0]
|
||||||
|
|
||||||
|
app_name = (
|
||||||
|
app_record.attributes.name
|
||||||
|
if app_record and app_record.attributes
|
||||||
|
else "unknown"
|
||||||
|
)
|
||||||
|
app_version = (
|
||||||
|
app_record.attributes.version
|
||||||
|
if app_record and app_record.attributes
|
||||||
|
else "unknown"
|
||||||
|
)
|
||||||
deployment_request = {
|
deployment_request = {
|
||||||
"record": {
|
"record": {
|
||||||
"type": "ApplicationDeploymentRequest",
|
"type": "ApplicationDeploymentRequest",
|
||||||
"application": app,
|
"application": app,
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"name": f"{app_record.attributes.name}@{app_record.attributes.version}",
|
"name": f"{app_name}@{app_version}",
|
||||||
"deployer": deployer,
|
"deployer": target_deployer,
|
||||||
"meta": {"when": str(datetime.utcnow())},
|
"meta": {"when": str(datetime.utcnow())},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if auction_id:
|
||||||
|
deployment_request["record"]["auction"] = auction_id
|
||||||
|
|
||||||
if config_ref:
|
if config_ref:
|
||||||
deployment_request["record"]["config"] = {"ref": config_ref}
|
deployment_request["record"]["config"] = {"ref": config_ref}
|
||||||
|
|
||||||
@ -165,11 +287,12 @@ def command(
|
|||||||
elif use_payment:
|
elif use_payment:
|
||||||
deployment_request["record"]["payment"] = use_payment
|
deployment_request["record"]["payment"] = use_payment
|
||||||
|
|
||||||
|
deployment_requests.append(deployment_request)
|
||||||
|
|
||||||
|
# Send all requests
|
||||||
|
for deployment_request in deployment_requests:
|
||||||
if dry_run:
|
if dry_run:
|
||||||
print(yaml.dump(deployment_request))
|
print(yaml.dump(deployment_request))
|
||||||
return
|
continue
|
||||||
|
|
||||||
# Send the request
|
|
||||||
laconic.publish(deployment_request)
|
laconic.publish(deployment_request)
|
||||||
finally:
|
|
||||||
shutil.rmtree(tempdir, ignore_errors=True)
|
|
||||||
|
|||||||
106
stack_orchestrator/deploy/webapp/request_webapp_undeployment.py
Normal file
106
stack_orchestrator/deploy/webapp/request_webapp_undeployment.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import click
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
|
||||||
|
|
||||||
|
|
||||||
|
def fatal(msg: str) -> None:
|
||||||
|
print(msg, file=sys.stderr)
|
||||||
|
sys.exit(1) # noqa: This function never returns
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployer", help="The LRN of the deployer to process this request.", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment",
|
||||||
|
help="Deployment record (ApplicationDeploymentRecord) id of the deployment.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--make-payment",
|
||||||
|
help=(
|
||||||
|
"The payment to make (in alnt). The value should be a number or "
|
||||||
|
"'auto' to use the deployer's minimum required payment."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
deployer,
|
||||||
|
deployment,
|
||||||
|
make_payment,
|
||||||
|
use_payment,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
if make_payment and use_payment:
|
||||||
|
fatal("Cannot use --make-payment and --use-payment at the same time.")
|
||||||
|
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
deployer_record = laconic.get_record(deployer)
|
||||||
|
if not deployer_record:
|
||||||
|
fatal(f"Unable to locate deployer: {deployer}")
|
||||||
|
|
||||||
|
undeployment_request = {
|
||||||
|
"record": {
|
||||||
|
"type": "ApplicationDeploymentRemovalRequest",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"deployer": deployer,
|
||||||
|
"deployment": deployment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if make_payment:
|
||||||
|
amount = 0
|
||||||
|
if dry_run:
|
||||||
|
undeployment_request["record"]["payment"] = "DRY_RUN"
|
||||||
|
elif "auto" == make_payment:
|
||||||
|
attrs = deployer_record.attributes if deployer_record else None
|
||||||
|
if attrs and "minimumPayment" in attrs:
|
||||||
|
amount = int(attrs.minimumPayment.replace("alnt", ""))
|
||||||
|
else:
|
||||||
|
amount = make_payment
|
||||||
|
if amount:
|
||||||
|
attrs = deployer_record.attributes if deployer_record else None
|
||||||
|
if attrs and attrs.paymentAddress:
|
||||||
|
receipt = laconic.send_tokens(attrs.paymentAddress, amount)
|
||||||
|
undeployment_request["record"]["payment"] = receipt.tx.hash
|
||||||
|
print("Payment TX:", receipt.tx.hash)
|
||||||
|
elif use_payment:
|
||||||
|
undeployment_request["record"]["payment"] = use_payment
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(yaml.dump(undeployment_request))
|
||||||
|
return
|
||||||
|
|
||||||
|
laconic.publish(undeployment_request)
|
||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers; allow re-build of either
|
||||||
|
# all or specific containers
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import click
|
import click
|
||||||
@ -36,30 +37,62 @@ WEBAPP_PORT = 80
|
|||||||
@click.option("--port", help="port to use (default random)")
|
@click.option("--port", help="port to use (default random)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, image, env_file, port):
|
def command(ctx, image, env_file, port):
|
||||||
'''run the specified webapp container'''
|
"""run the specified webapp container"""
|
||||||
|
|
||||||
env = {}
|
env: dict[str, str] = {}
|
||||||
if env_file:
|
if env_file:
|
||||||
env = dotenv_values(env_file)
|
# Filter out None values from dotenv
|
||||||
|
for k, v in dotenv_values(env_file).items():
|
||||||
|
if v is not None:
|
||||||
|
env[k] = v
|
||||||
|
|
||||||
unique_cluster_descriptor = f"{image},{env}"
|
unique_cluster_descriptor = f"{image},{env}"
|
||||||
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
|
||||||
cluster = f"laconic-webapp-{hash}"
|
cluster = f"laconic-webapp-{hash}"
|
||||||
|
|
||||||
deployer = getDeployer(type=constants.compose_deploy_type,
|
deployer = getDeployer(
|
||||||
deployment_context=None,
|
type=constants.compose_deploy_type,
|
||||||
compose_files=None,
|
deployment_context=None,
|
||||||
compose_project_name=cluster,
|
compose_files=None,
|
||||||
compose_env_file=None)
|
compose_project_name=cluster,
|
||||||
|
compose_env_file=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not deployer:
|
||||||
|
print("Failed to create deployer", file=click.get_text_stream("stderr"))
|
||||||
|
ctx.exit(1)
|
||||||
|
return # Unreachable, but helps type checker
|
||||||
|
|
||||||
ports = []
|
ports = []
|
||||||
if port:
|
if port:
|
||||||
ports = [(port, WEBAPP_PORT)]
|
ports = [(port, WEBAPP_PORT)]
|
||||||
container = deployer.run(image, command=[], user=None, volumes=[], entrypoint=None, env=env, ports=ports, detach=True)
|
container = deployer.run(
|
||||||
|
image,
|
||||||
|
command=[],
|
||||||
|
user=None,
|
||||||
|
volumes=[],
|
||||||
|
entrypoint=None,
|
||||||
|
env=env,
|
||||||
|
ports=ports,
|
||||||
|
detach=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Make configurable?
|
# Make configurable?
|
||||||
webappPort = f"{WEBAPP_PORT}/tcp"
|
webappPort = f"{WEBAPP_PORT}/tcp"
|
||||||
# TODO: This assumes a Docker container object...
|
# TODO: This assumes a Docker container object...
|
||||||
if webappPort in container.network_settings.ports:
|
# Check if container has network_settings (Docker container object)
|
||||||
|
if (
|
||||||
|
container
|
||||||
|
and hasattr(container, "network_settings")
|
||||||
|
and container.network_settings
|
||||||
|
and hasattr(container.network_settings, "ports")
|
||||||
|
and container.network_settings.ports
|
||||||
|
and webappPort in container.network_settings.ports
|
||||||
|
):
|
||||||
mapping = container.network_settings.ports[webappPort][0]
|
mapping = container.network_settings.ports[webappPort][0]
|
||||||
print(f"""Image: {image}\nID: {container.id}\nURL: http://localhost:{mapping['HostPort']}""")
|
container_id = getattr(container, "id", "unknown")
|
||||||
|
print(
|
||||||
|
f"Image: {image}\n"
|
||||||
|
f"ID: {container_id}\n"
|
||||||
|
f"URL: http://localhost:{mapping['HostPort']}"
|
||||||
|
)
|
||||||
|
|||||||
@ -43,7 +43,13 @@ def process_app_removal_request(
|
|||||||
deployment_record = laconic.get_record(
|
deployment_record = laconic.get_record(
|
||||||
app_removal_request.attributes.deployment, require=True
|
app_removal_request.attributes.deployment, require=True
|
||||||
)
|
)
|
||||||
|
assert deployment_record is not None # require=True ensures this
|
||||||
|
assert deployment_record.attributes is not None
|
||||||
|
|
||||||
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
||||||
|
assert dns_record is not None # require=True ensures this
|
||||||
|
assert dns_record.attributes is not None
|
||||||
|
|
||||||
deployment_dir = os.path.join(
|
deployment_dir = os.path.join(
|
||||||
deployment_parent_dir, dns_record.attributes.name.lower()
|
deployment_parent_dir, dns_record.attributes.name.lower()
|
||||||
)
|
)
|
||||||
@ -51,40 +57,50 @@ def process_app_removal_request(
|
|||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
||||||
|
|
||||||
# Check if the removal request is from the owner of the DnsRecord or deployment record.
|
# Check if the removal request is from the owner of the DnsRecord or
|
||||||
|
# deployment record.
|
||||||
matched_owner = match_owner(app_removal_request, deployment_record, dns_record)
|
matched_owner = match_owner(app_removal_request, deployment_record, dns_record)
|
||||||
|
|
||||||
# Or of the original deployment request.
|
# Or of the original deployment request.
|
||||||
if not matched_owner and deployment_record.attributes.request:
|
if not matched_owner and deployment_record.attributes.request:
|
||||||
matched_owner = match_owner(
|
original_request = laconic.get_record(
|
||||||
app_removal_request,
|
deployment_record.attributes.request, require=True
|
||||||
laconic.get_record(deployment_record.attributes.request, require=True),
|
|
||||||
)
|
)
|
||||||
|
assert original_request is not None # require=True ensures this
|
||||||
|
matched_owner = match_owner(app_removal_request, original_request)
|
||||||
|
|
||||||
if matched_owner:
|
if matched_owner:
|
||||||
main_logger.log("Matched deployment ownership:", matched_owner)
|
main_logger.log(f"Matched deployment ownership: {matched_owner}")
|
||||||
else:
|
else:
|
||||||
|
deployment_id = deployment_record.id if deployment_record else "unknown"
|
||||||
|
request_id = app_removal_request.id if app_removal_request else "unknown"
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Unable to confirm ownership of deployment %s for removal request %s"
|
"Unable to confirm ownership of deployment %s for removal request %s"
|
||||||
% (deployment_record.id, app_removal_request.id)
|
% (deployment_id, request_id)
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO(telackey): Call the function directly. The easiest way to build the correct click context is to
|
# TODO(telackey): Call the function directly. The easiest way to build
|
||||||
# exec the process, but it would be better to refactor so we could just call down_operation with the
|
# the correct click context is to exec the process, but it would be better
|
||||||
# necessary parameters
|
# to refactor so we could just call down_operation with the necessary
|
||||||
|
# parameters
|
||||||
down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
down_command = [sys.argv[0], "deployment", "--dir", deployment_dir, "down"]
|
||||||
if delete_volumes:
|
if delete_volumes:
|
||||||
down_command.append("--delete-volumes")
|
down_command.append("--delete-volumes")
|
||||||
result = subprocess.run(down_command)
|
result = subprocess.run(down_command)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
|
deployer_name = (
|
||||||
|
webapp_deployer_record.names[0]
|
||||||
|
if webapp_deployer_record and webapp_deployer_record.names
|
||||||
|
else ""
|
||||||
|
)
|
||||||
removal_record = {
|
removal_record = {
|
||||||
"record": {
|
"record": {
|
||||||
"type": "ApplicationDeploymentRemovalRecord",
|
"type": "ApplicationDeploymentRemovalRecord",
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"request": app_removal_request.id,
|
"request": app_removal_request.id if app_removal_request else "",
|
||||||
"deployment": deployment_record.id,
|
"deployment": deployment_record.id if deployment_record else "",
|
||||||
"deployer": webapp_deployer_record.names[0],
|
"deployer": deployer_name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,11 +110,11 @@ def process_app_removal_request(
|
|||||||
laconic.publish(removal_record)
|
laconic.publish(removal_record)
|
||||||
|
|
||||||
if delete_names:
|
if delete_names:
|
||||||
if deployment_record.names:
|
if deployment_record and deployment_record.names:
|
||||||
for name in deployment_record.names:
|
for name in deployment_record.names:
|
||||||
laconic.delete_name(name)
|
laconic.delete_name(name)
|
||||||
|
|
||||||
if dns_record.names:
|
if dns_record and dns_record.names:
|
||||||
for name in dns_record.names:
|
for name in dns_record.names:
|
||||||
laconic.delete_name(name)
|
laconic.delete_name(name)
|
||||||
|
|
||||||
@ -178,6 +194,11 @@ def dump_known_requests(filename, requests):
|
|||||||
"my payment address are examined).",
|
"my payment address are examined).",
|
||||||
is_flag=True,
|
is_flag=True,
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file",
|
||||||
|
help="File path to use for registry mutex lock",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command( # noqa: C901
|
def command( # noqa: C901
|
||||||
ctx,
|
ctx,
|
||||||
@ -195,6 +216,7 @@ def command( # noqa: C901
|
|||||||
min_required_payment,
|
min_required_payment,
|
||||||
lrn,
|
lrn,
|
||||||
all_requests,
|
all_requests,
|
||||||
|
registry_lock_file,
|
||||||
):
|
):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
@ -212,19 +234,25 @@ def command( # noqa: C901
|
|||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr)
|
laconic = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
deployer_record = laconic.get_record(lrn, require=True)
|
deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
assert deployer_record is not None # require=True ensures this
|
||||||
|
assert deployer_record.attributes is not None
|
||||||
payment_address = deployer_record.attributes.paymentAddress
|
payment_address = deployer_record.attributes.paymentAddress
|
||||||
main_logger.log(f"Payment address: {payment_address}")
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
if min_required_payment and not payment_address:
|
if min_required_payment and not payment_address:
|
||||||
print(
|
print(
|
||||||
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
f"Minimum payment required, but no payment address listed "
|
||||||
|
f"for deployer: {lrn}.",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment removal requests.
|
# Find deployment removal requests.
|
||||||
|
requests = []
|
||||||
# single request
|
# single request
|
||||||
if request_id:
|
if request_id:
|
||||||
main_logger.log(f"Retrieving request {request_id}...")
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
@ -248,32 +276,39 @@ def command( # noqa: C901
|
|||||||
main_logger.log(f"Loading known requests from {state_file}...")
|
main_logger.log(f"Loading known requests from {state_file}...")
|
||||||
previous_requests = load_known_requests(state_file)
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
requests.sort(key=lambda r: r.createTime)
|
# Filter out None values and sort by createTime
|
||||||
requests.reverse()
|
valid_requests = [r for r in requests if r is not None]
|
||||||
|
valid_requests.sort(key=lambda r: r.createTime if r else "")
|
||||||
|
valid_requests.reverse()
|
||||||
|
|
||||||
# Find deployments.
|
# Find deployments.
|
||||||
named_deployments = {}
|
named_deployments = {}
|
||||||
main_logger.log("Discovering app deployments...")
|
main_logger.log("Discovering app deployments...")
|
||||||
for d in laconic.app_deployments(all=False):
|
for d in laconic.app_deployments(all=False):
|
||||||
named_deployments[d.id] = d
|
if d and d.id:
|
||||||
|
named_deployments[d.id] = d
|
||||||
|
|
||||||
# Find removal requests.
|
# Find removal requests.
|
||||||
removals_by_deployment = {}
|
removals_by_deployment = {}
|
||||||
removals_by_request = {}
|
removals_by_request = {}
|
||||||
main_logger.log("Discovering deployment removals...")
|
main_logger.log("Discovering deployment removals...")
|
||||||
for r in laconic.app_deployment_removals():
|
for r in laconic.app_deployment_removals():
|
||||||
if r.attributes.deployment:
|
if r and r.attributes and r.attributes.deployment:
|
||||||
# TODO: should we handle CRNs?
|
# TODO: should we handle CRNs?
|
||||||
removals_by_deployment[r.attributes.deployment] = r
|
removals_by_deployment[r.attributes.deployment] = r
|
||||||
|
|
||||||
one_per_deployment = {}
|
one_per_deployment = {}
|
||||||
for r in requests:
|
for r in valid_requests:
|
||||||
|
if not r or not r.attributes:
|
||||||
|
continue
|
||||||
if not r.attributes.deployment:
|
if not r.attributes.deployment:
|
||||||
|
r_id = r.id if r else "unknown"
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping removal request {r.id} since it was a cancellation."
|
f"Skipping removal request {r_id} since it was a cancellation."
|
||||||
)
|
)
|
||||||
elif r.attributes.deployment in one_per_deployment:
|
elif r.attributes.deployment in one_per_deployment:
|
||||||
main_logger.log(f"Skipping removal request {r.id} since it was superseded.")
|
r_id = r.id if r else "unknown"
|
||||||
|
main_logger.log(f"Skipping removal request {r_id} since it was superseded.")
|
||||||
else:
|
else:
|
||||||
one_per_deployment[r.attributes.deployment] = r
|
one_per_deployment[r.attributes.deployment] = r
|
||||||
|
|
||||||
@ -282,21 +317,25 @@ def command( # noqa: C901
|
|||||||
try:
|
try:
|
||||||
if r.attributes.deployment not in named_deployments:
|
if r.attributes.deployment not in named_deployments:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping removal request {r.id} for {r.attributes.deployment} because it does"
|
f"Skipping removal request {r.id} for "
|
||||||
f"not appear to refer to a live, named deployment."
|
f"{r.attributes.deployment} because it does not appear to "
|
||||||
|
"refer to a live, named deployment."
|
||||||
)
|
)
|
||||||
elif skip_by_tag(r, include_tags, exclude_tags):
|
elif skip_by_tag(r, include_tags, exclude_tags):
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
"Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)"
|
"Skipping removal request %s, filtered by tag "
|
||||||
|
"(include %s, exclude %s, present %s)"
|
||||||
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
||||||
)
|
)
|
||||||
elif r.id in removals_by_request:
|
elif r.id in removals_by_request:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}"
|
f"Found satisfied request for {r.id} "
|
||||||
|
f"at {removals_by_request[r.id].id}"
|
||||||
)
|
)
|
||||||
elif r.attributes.deployment in removals_by_deployment:
|
elif r.attributes.deployment in removals_by_deployment:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Found removal record for indicated deployment {r.attributes.deployment} at "
|
f"Found removal record for indicated deployment "
|
||||||
|
f"{r.attributes.deployment} at "
|
||||||
f"{removals_by_deployment[r.attributes.deployment].id}"
|
f"{removals_by_deployment[r.attributes.deployment].id}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@ -305,12 +344,14 @@ def command( # noqa: C901
|
|||||||
requests_to_check_for_payment.append(r)
|
requests_to_check_for_payment.append(r)
|
||||||
else:
|
else:
|
||||||
main_logger.log(
|
main_logger.log(
|
||||||
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
f"Skipping unsatisfied request {r.id} "
|
||||||
|
"because we have seen it before."
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
main_logger.log(f"ERROR examining {r.id}: {e}")
|
main_logger.log(f"ERROR examining {r.id}: {e}")
|
||||||
|
|
||||||
requests_to_execute = []
|
requests_to_execute = []
|
||||||
|
# TODO: Handle requests with auction
|
||||||
if min_required_payment:
|
if min_required_payment:
|
||||||
for r in requests_to_check_for_payment:
|
for r in requests_to_check_for_payment:
|
||||||
main_logger.log(f"{r.id}: Confirming payment...")
|
main_logger.log(f"{r.id}: Confirming payment...")
|
||||||
|
|||||||
@ -24,29 +24,53 @@ import tempfile
|
|||||||
import uuid
|
import uuid
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Any, List, Optional, TextIO
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex
|
||||||
|
|
||||||
|
|
||||||
|
class AuctionStatus(str, Enum):
|
||||||
|
COMMIT = "commit"
|
||||||
|
REVEAL = "reveal"
|
||||||
|
COMPLETED = "completed"
|
||||||
|
EXPIRED = "expired"
|
||||||
|
|
||||||
|
|
||||||
|
TOKEN_DENOM = "alnt"
|
||||||
|
AUCTION_KIND_PROVIDER = "provider"
|
||||||
|
|
||||||
|
|
||||||
class AttrDict(dict):
|
class AttrDict(dict):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
super(AttrDict, self).__init__(*args, **kwargs)
|
super(AttrDict, self).__init__(*args, **kwargs)
|
||||||
self.__dict__ = self
|
self.__dict__ = self
|
||||||
|
|
||||||
def __getattribute__(self, attr):
|
def __getattribute__(self, attr: str) -> Any:
|
||||||
__dict__ = super(AttrDict, self).__getattribute__("__dict__")
|
__dict__ = super(AttrDict, self).__getattribute__("__dict__")
|
||||||
if attr in __dict__:
|
if attr in __dict__:
|
||||||
v = super(AttrDict, self).__getattribute__(attr)
|
v = super(AttrDict, self).__getattribute__(attr)
|
||||||
if isinstance(v, dict):
|
if isinstance(v, dict):
|
||||||
return AttrDict(v)
|
return AttrDict(v)
|
||||||
return v
|
return v
|
||||||
|
return super(AttrDict, self).__getattribute__(attr)
|
||||||
|
|
||||||
|
def __getattr__(self, attr: str) -> Any:
|
||||||
|
# This method is called when attribute is not found
|
||||||
|
# Return None for missing attributes (matches original behavior)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
class TimedLogger:
|
class TimedLogger:
|
||||||
def __init__(self, id="", file=None):
|
def __init__(self, id: str = "", file: Optional[TextIO] = None) -> None:
|
||||||
self.start = datetime.datetime.now()
|
self.start = datetime.datetime.now()
|
||||||
self.last = self.start
|
self.last = self.start
|
||||||
self.id = id
|
self.id = id
|
||||||
self.file = file
|
self.file = file
|
||||||
|
|
||||||
def log(self, msg, show_step_time=True, show_total_time=False):
|
def log(
|
||||||
|
self, msg: str, show_step_time: bool = True, show_total_time: bool = False
|
||||||
|
) -> None:
|
||||||
prefix = f"{datetime.datetime.utcnow()} - {self.id}"
|
prefix = f"{datetime.datetime.utcnow()} - {self.id}"
|
||||||
if show_step_time:
|
if show_step_time:
|
||||||
prefix += f" - {datetime.datetime.now() - self.last} (step)"
|
prefix += f" - {datetime.datetime.now() - self.last} (step)"
|
||||||
@ -58,7 +82,13 @@ class TimedLogger:
|
|||||||
self.last = datetime.datetime.now()
|
self.last = datetime.datetime.now()
|
||||||
|
|
||||||
|
|
||||||
def logged_cmd(log_file, *vargs):
|
def load_known_requests(filename):
|
||||||
|
if filename and os.path.exists(filename):
|
||||||
|
return json.load(open(filename, "r"))
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def logged_cmd(log_file: Optional[TextIO], *vargs: str) -> str:
|
||||||
result = None
|
result = None
|
||||||
try:
|
try:
|
||||||
if log_file:
|
if log_file:
|
||||||
@ -67,17 +97,22 @@ def logged_cmd(log_file, *vargs):
|
|||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
return result.stdout.decode()
|
return result.stdout.decode()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if result:
|
if log_file:
|
||||||
print(result.stderr.decode(), file=log_file)
|
if result:
|
||||||
else:
|
print(result.stderr.decode(), file=log_file)
|
||||||
print(str(err), file=log_file)
|
else:
|
||||||
|
print(str(err), file=log_file)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
|
|
||||||
def match_owner(recordA, *records):
|
def match_owner(
|
||||||
|
recordA: Optional[AttrDict], *records: Optional[AttrDict]
|
||||||
|
) -> Optional[str]:
|
||||||
|
if not recordA or not recordA.owners:
|
||||||
|
return None
|
||||||
for owner in recordA.owners:
|
for owner in recordA.owners:
|
||||||
for otherRecord in records:
|
for otherRecord in records:
|
||||||
if owner in otherRecord.owners:
|
if otherRecord and otherRecord.owners and owner in otherRecord.owners:
|
||||||
return owner
|
return owner
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -92,76 +127,8 @@ def is_id(name_or_id: str):
|
|||||||
return not is_lrn(name_or_id)
|
return not is_lrn(name_or_id)
|
||||||
|
|
||||||
|
|
||||||
def confirm_payment(laconic, record, payment_address, min_amount, logger):
|
|
||||||
req_owner = laconic.get_owner(record)
|
|
||||||
if req_owner == payment_address:
|
|
||||||
# No need to confirm payment if the sender and recipient are the same account.
|
|
||||||
return True
|
|
||||||
|
|
||||||
if not record.attributes.payment:
|
|
||||||
logger.log(f"{record.id}: no payment tx info")
|
|
||||||
return False
|
|
||||||
|
|
||||||
tx = laconic.get_tx(record.attributes.payment)
|
|
||||||
if not tx:
|
|
||||||
logger.log(f"{record.id}: cannot locate payment tx")
|
|
||||||
return False
|
|
||||||
|
|
||||||
if tx.code != 0:
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if tx.sender != req_owner:
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment "
|
|
||||||
f"request owner {req_owner}"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if tx.recipient != payment_address:
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
|
|
||||||
if pay_denom != "alnt":
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
pay_amount = int("".join([i for i in tx.amount if i.isdigit()]))
|
|
||||||
if pay_amount < min_amount:
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check if the payment was already used on a
|
|
||||||
used = laconic.app_deployments(
|
|
||||||
{"deployer": payment_address, "payment": tx.hash}, all=True
|
|
||||||
)
|
|
||||||
if len(used):
|
|
||||||
logger.log(f"{record.id}: payment {tx.hash} already used on deployment {used}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
used = laconic.app_deployment_removals(
|
|
||||||
{"deployer": payment_address, "payment": tx.hash}, all=True
|
|
||||||
)
|
|
||||||
if len(used):
|
|
||||||
logger.log(
|
|
||||||
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class LaconicRegistryClient:
|
class LaconicRegistryClient:
|
||||||
def __init__(self, config_file, log_file=None):
|
def __init__(self, config_file, log_file=None, mutex_lock_file=None):
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
self.log_file = log_file
|
self.log_file = log_file
|
||||||
self.cache = AttrDict(
|
self.cache = AttrDict(
|
||||||
@ -172,6 +139,9 @@ class LaconicRegistryClient:
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.mutex_lock_file = mutex_lock_file
|
||||||
|
self.mutex_lock_acquired = False
|
||||||
|
|
||||||
def whoami(self, refresh=False):
|
def whoami(self, refresh=False):
|
||||||
if not refresh and "whoami" in self.cache:
|
if not refresh and "whoami" in self.cache:
|
||||||
return self.cache["whoami"]
|
return self.cache["whoami"]
|
||||||
@ -270,25 +240,27 @@ class LaconicRegistryClient:
|
|||||||
]
|
]
|
||||||
|
|
||||||
# Most recent records first
|
# Most recent records first
|
||||||
results.sort(key=lambda r: r.createTime)
|
results.sort(key=lambda r: r.createTime or "")
|
||||||
results.reverse()
|
results.reverse()
|
||||||
self._add_to_cache(results)
|
self._add_to_cache(results)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _add_to_cache(self, records):
|
def _add_to_cache(self, records: List[AttrDict]) -> None:
|
||||||
if not records:
|
if not records:
|
||||||
return
|
return
|
||||||
|
|
||||||
for p in records:
|
for p in records:
|
||||||
self.cache["name_or_id"][p.id] = p
|
if p.id:
|
||||||
|
self.cache["name_or_id"][p.id] = p
|
||||||
if p.names:
|
if p.names:
|
||||||
for lrn in p.names:
|
for lrn in p.names:
|
||||||
self.cache["name_or_id"][lrn] = p
|
self.cache["name_or_id"][lrn] = p
|
||||||
if p.attributes and p.attributes.type:
|
if p.attributes and p.attributes.type:
|
||||||
if p.attributes.type not in self.cache:
|
attr_type = p.attributes.type
|
||||||
self.cache[p.attributes.type] = []
|
if attr_type not in self.cache:
|
||||||
self.cache[p.attributes.type].append(p)
|
self.cache[attr_type] = []
|
||||||
|
self.cache[attr_type].append(p)
|
||||||
|
|
||||||
def resolve(self, name):
|
def resolve(self, name):
|
||||||
if not name:
|
if not name:
|
||||||
@ -370,6 +342,34 @@ class LaconicRegistryClient:
|
|||||||
if require:
|
if require:
|
||||||
raise Exception("Cannot locate tx:", hash)
|
raise Exception("Cannot locate tx:", hash)
|
||||||
|
|
||||||
|
def get_auction(self, auction_id, require=False):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"get",
|
||||||
|
"--id",
|
||||||
|
auction_id,
|
||||||
|
]
|
||||||
|
|
||||||
|
results = None
|
||||||
|
try:
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
except: # noqa: E722
|
||||||
|
pass
|
||||||
|
|
||||||
|
if results and len(results):
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
if require:
|
||||||
|
raise Exception("Cannot locate auction:", auction_id)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def app_deployment_requests(self, criteria=None, all=True):
|
def app_deployment_requests(self, criteria=None, all=True):
|
||||||
if criteria is None:
|
if criteria is None:
|
||||||
criteria = {}
|
criteria = {}
|
||||||
@ -398,6 +398,21 @@ class LaconicRegistryClient:
|
|||||||
criteria["type"] = "ApplicationDeploymentRemovalRecord"
|
criteria["type"] = "ApplicationDeploymentRemovalRecord"
|
||||||
return self.list_records(criteria, all)
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def webapp_deployers(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "WebappDeployer"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def app_deployment_auctions(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentAuction"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def publish(self, record, names=None):
|
def publish(self, record, names=None):
|
||||||
if names is None:
|
if names is None:
|
||||||
names = []
|
names = []
|
||||||
@ -428,6 +443,7 @@ class LaconicRegistryClient:
|
|||||||
finally:
|
finally:
|
||||||
logged_cmd(self.log_file, "rm", "-rf", tmpdir)
|
logged_cmd(self.log_file, "rm", "-rf", tmpdir)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def set_name(self, name, record_id):
|
def set_name(self, name, record_id):
|
||||||
logged_cmd(
|
logged_cmd(
|
||||||
self.log_file,
|
self.log_file,
|
||||||
@ -441,6 +457,7 @@ class LaconicRegistryClient:
|
|||||||
record_id,
|
record_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def delete_name(self, name):
|
def delete_name(self, name):
|
||||||
logged_cmd(
|
logged_cmd(
|
||||||
self.log_file,
|
self.log_file,
|
||||||
@ -453,6 +470,7 @@ class LaconicRegistryClient:
|
|||||||
name,
|
name,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def send_tokens(self, address, amount, type="alnt"):
|
def send_tokens(self, address, amount, type="alnt"):
|
||||||
args = [
|
args = [
|
||||||
"laconic",
|
"laconic",
|
||||||
@ -471,6 +489,67 @@ class LaconicRegistryClient:
|
|||||||
|
|
||||||
return AttrDict(json.loads(logged_cmd(self.log_file, *args)))
|
return AttrDict(json.loads(logged_cmd(self.log_file, *args)))
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def create_deployment_auction(self, auction):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"create",
|
||||||
|
"--kind",
|
||||||
|
auction["kind"],
|
||||||
|
"--commits-duration",
|
||||||
|
str(auction["commits_duration"]),
|
||||||
|
"--reveals-duration",
|
||||||
|
str(auction["reveals_duration"]),
|
||||||
|
"--denom",
|
||||||
|
auction["denom"],
|
||||||
|
"--commit-fee",
|
||||||
|
str(auction["commit_fee"]),
|
||||||
|
"--reveal-fee",
|
||||||
|
str(auction["reveal_fee"]),
|
||||||
|
"--max-price",
|
||||||
|
str(auction["max_price"]),
|
||||||
|
"--num-providers",
|
||||||
|
str(auction["num_providers"]),
|
||||||
|
]
|
||||||
|
|
||||||
|
return json.loads(logged_cmd(self.log_file, *args))["auctionId"]
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def commit_bid(self, auction_id, amount, type="alnt"):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"bid",
|
||||||
|
"commit",
|
||||||
|
auction_id,
|
||||||
|
str(amount),
|
||||||
|
type,
|
||||||
|
]
|
||||||
|
|
||||||
|
return json.loads(logged_cmd(self.log_file, *args))["reveal_file"]
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def reveal_bid(self, auction_id, reveal_file_path):
|
||||||
|
logged_cmd(
|
||||||
|
self.log_file,
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"bid",
|
||||||
|
"reveal",
|
||||||
|
auction_id,
|
||||||
|
reveal_file_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def file_hash(filename):
|
def file_hash(filename):
|
||||||
return hashlib.sha1(open(filename).read().encode()).hexdigest()
|
return hashlib.sha1(open(filename).read().encode()).hexdigest()
|
||||||
@ -493,24 +572,36 @@ def determine_base_container(clone_dir, app_type="webapp"):
|
|||||||
return base_container
|
return base_container
|
||||||
|
|
||||||
|
|
||||||
def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
def build_container_image(
|
||||||
|
app_record: Optional[AttrDict],
|
||||||
|
tag: str,
|
||||||
|
extra_build_args: Optional[List[str]] = None,
|
||||||
|
logger: Optional[TimedLogger] = None,
|
||||||
|
) -> None:
|
||||||
|
if app_record is None:
|
||||||
|
raise ValueError("app_record cannot be None")
|
||||||
if extra_build_args is None:
|
if extra_build_args is None:
|
||||||
extra_build_args = []
|
extra_build_args = []
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
# TODO: determine if this code could be calling into the Python git library like setup-repositories
|
# TODO: determine if this code could be calling into the Python git
|
||||||
|
# library like setup-repositories
|
||||||
|
log_file = logger.file if logger else None
|
||||||
try:
|
try:
|
||||||
record_id = app_record["id"]
|
record_id = app_record["id"]
|
||||||
ref = app_record.attributes.repository_ref
|
ref = app_record.attributes.repository_ref
|
||||||
repo = random.choice(app_record.attributes.repository)
|
repo = random.choice(app_record.attributes.repository)
|
||||||
clone_dir = os.path.join(tmpdir, record_id)
|
clone_dir = os.path.join(tmpdir, record_id)
|
||||||
|
|
||||||
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
if logger:
|
||||||
|
logger.log(f"Cloning repository {repo} to {clone_dir} ...")
|
||||||
# Set github credentials if present running a command like:
|
# Set github credentials if present running a command like:
|
||||||
# git config --global url."https://${TOKEN}:@github.com/".insteadOf "https://github.com/"
|
# git config --global url."https://${TOKEN}:@github.com/".insteadOf
|
||||||
|
# "https://github.com/"
|
||||||
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
github_token = os.environ.get("DEPLOYER_GITHUB_TOKEN")
|
||||||
if github_token:
|
if github_token:
|
||||||
logger.log("Github token detected, setting it in the git environment")
|
if logger:
|
||||||
|
logger.log("Github token detected, setting it in the git environment")
|
||||||
git_config_args = [
|
git_config_args = [
|
||||||
"git",
|
"git",
|
||||||
"config",
|
"config",
|
||||||
@ -518,9 +609,7 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
|||||||
f"url.https://{github_token}:@github.com/.insteadOf",
|
f"url.https://{github_token}:@github.com/.insteadOf",
|
||||||
"https://github.com/",
|
"https://github.com/",
|
||||||
]
|
]
|
||||||
result = subprocess.run(
|
result = subprocess.run(git_config_args, stdout=log_file, stderr=log_file)
|
||||||
git_config_args, stdout=logger.file, stderr=logger.file
|
|
||||||
)
|
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
if ref:
|
if ref:
|
||||||
# TODO: Determing branch or hash, and use depth 1 if we can.
|
# TODO: Determing branch or hash, and use depth 1 if we can.
|
||||||
@ -531,29 +620,32 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
|||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
["git", "clone", repo, clone_dir],
|
["git", "clone", repo, clone_dir],
|
||||||
env=git_env,
|
env=git_env,
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git clone failed. Is the repository {repo} private?")
|
if logger:
|
||||||
|
logger.log(f"git clone failed. Is the repository {repo} private?")
|
||||||
raise e
|
raise e
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
["git", "checkout", ref],
|
["git", "checkout", ref],
|
||||||
cwd=clone_dir,
|
cwd=clone_dir,
|
||||||
env=git_env,
|
env=git_env,
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
if logger:
|
||||||
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
# TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
|
# TODO: why is this code different vs the branch above (run vs check_call,
|
||||||
|
# and no prompt disable)?
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["git", "clone", "--depth", "1", repo, clone_dir],
|
["git", "clone", "--depth", "1", repo, clone_dir],
|
||||||
stdout=logger.file,
|
stdout=log_file,
|
||||||
stderr=logger.file,
|
stderr=log_file,
|
||||||
)
|
)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
@ -561,7 +653,8 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
|||||||
clone_dir, app_record.attributes.app_type
|
clone_dir, app_record.attributes.app_type
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.log("Building webapp ...")
|
if logger:
|
||||||
|
logger.log("Building webapp ...")
|
||||||
build_command = [
|
build_command = [
|
||||||
sys.argv[0],
|
sys.argv[0],
|
||||||
"--verbose",
|
"--verbose",
|
||||||
@ -577,10 +670,10 @@ def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
|||||||
build_command.append("--extra-build-args")
|
build_command.append("--extra-build-args")
|
||||||
build_command.append(" ".join(extra_build_args))
|
build_command.append(" ".join(extra_build_args))
|
||||||
|
|
||||||
result = subprocess.run(build_command, stdout=logger.file, stderr=logger.file)
|
result = subprocess.run(build_command, stdout=log_file, stderr=log_file)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
finally:
|
finally:
|
||||||
logged_cmd(logger.file, "rm", "-rf", tmpdir)
|
logged_cmd(log_file, "rm", "-rf", tmpdir)
|
||||||
|
|
||||||
|
|
||||||
def push_container_image(deployment_dir, logger):
|
def push_container_image(deployment_dir, logger):
|
||||||
@ -626,6 +719,7 @@ def publish_deployment(
|
|||||||
dns_record,
|
dns_record,
|
||||||
dns_lrn,
|
dns_lrn,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
|
dns_value=None,
|
||||||
app_deployment_request=None,
|
app_deployment_request=None,
|
||||||
webapp_deployer_record=None,
|
webapp_deployer_record=None,
|
||||||
logger=None,
|
logger=None,
|
||||||
@ -658,6 +752,8 @@ def publish_deployment(
|
|||||||
}
|
}
|
||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
new_dns_record["record"]["request"] = app_deployment_request.id
|
new_dns_record["record"]["request"] = app_deployment_request.id
|
||||||
|
if dns_value:
|
||||||
|
new_dns_record["record"]["value"] = dns_value
|
||||||
|
|
||||||
if logger:
|
if logger:
|
||||||
logger.log("Publishing DnsRecord.")
|
logger.log("Publishing DnsRecord.")
|
||||||
@ -677,9 +773,16 @@ def publish_deployment(
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
new_deployment_record["record"]["request"] = app_deployment_request.id
|
new_deployment_record["record"]["request"] = app_deployment_request.id
|
||||||
if app_deployment_request.attributes.payment:
|
|
||||||
|
# Set auction or payment id from request
|
||||||
|
if app_deployment_request.attributes.auction:
|
||||||
|
new_deployment_record["record"][
|
||||||
|
"auction"
|
||||||
|
] = app_deployment_request.attributes.auction
|
||||||
|
elif app_deployment_request.attributes.payment:
|
||||||
new_deployment_record["record"][
|
new_deployment_record["record"][
|
||||||
"payment"
|
"payment"
|
||||||
] = app_deployment_request.attributes.payment
|
] = app_deployment_request.attributes.payment
|
||||||
@ -730,3 +833,133 @@ def skip_by_tag(r, include_tags, exclude_tags):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def confirm_payment(
|
||||||
|
laconic: LaconicRegistryClient,
|
||||||
|
record: AttrDict,
|
||||||
|
payment_address: str,
|
||||||
|
min_amount: int,
|
||||||
|
logger: TimedLogger,
|
||||||
|
) -> bool:
|
||||||
|
req_owner = laconic.get_owner(record)
|
||||||
|
if req_owner == payment_address:
|
||||||
|
# No need to confirm payment if the sender and recipient are the same account.
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not record.attributes.payment:
|
||||||
|
logger.log(f"{record.id}: no payment tx info")
|
||||||
|
return False
|
||||||
|
|
||||||
|
tx = laconic.get_tx(record.attributes.payment)
|
||||||
|
if not tx:
|
||||||
|
logger.log(f"{record.id}: cannot locate payment tx")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.code != 0:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment tx {tx.hash} was not successful - "
|
||||||
|
f"code: {tx.code}, log: {tx.log}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.sender != req_owner:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} "
|
||||||
|
f"does not match deployment request owner {req_owner}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.recipient != payment_address:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} "
|
||||||
|
f"does not match {payment_address}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
tx_amount = tx.amount or ""
|
||||||
|
pay_denom = "".join([i for i in tx_amount if not i.isdigit()])
|
||||||
|
if pay_denom != "alnt":
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected "
|
||||||
|
"payment denomination"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
pay_amount = int("".join([i for i in tx_amount if i.isdigit()]) or "0")
|
||||||
|
if pay_amount < min_amount:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the payment was already used on a deployment
|
||||||
|
used = laconic.app_deployments(
|
||||||
|
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
|
||||||
|
)
|
||||||
|
if len(used):
|
||||||
|
# Fetch the app name from request record
|
||||||
|
used_request = laconic.get_record(used[0].attributes.request, require=True)
|
||||||
|
|
||||||
|
# Check that payment was used for deployment of same application
|
||||||
|
used_app = used_request.attributes.application if used_request else None
|
||||||
|
if record.attributes.application != used_app:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment {tx.hash} already used on a different "
|
||||||
|
f"application deployment {used}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
used = laconic.app_deployment_removals(
|
||||||
|
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
|
||||||
|
)
|
||||||
|
if len(used):
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def confirm_auction(
|
||||||
|
laconic: LaconicRegistryClient,
|
||||||
|
record: AttrDict,
|
||||||
|
deployer_lrn: str,
|
||||||
|
payment_address: str,
|
||||||
|
logger: TimedLogger,
|
||||||
|
) -> bool:
|
||||||
|
auction_id = record.attributes.auction
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
|
||||||
|
# Fetch auction record for given auction
|
||||||
|
auction_records_by_id = laconic.app_deployment_auctions({"auction": auction_id})
|
||||||
|
if len(auction_records_by_id) == 0:
|
||||||
|
logger.log(f"{record.id}: unable to locate record for auction {auction_id}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Cross check app against application in the auction record
|
||||||
|
requested_app = laconic.get_record(record.attributes.application, require=True)
|
||||||
|
auction_app = laconic.get_record(
|
||||||
|
auction_records_by_id[0].attributes.application, require=True
|
||||||
|
)
|
||||||
|
requested_app_id = requested_app.id if requested_app else None
|
||||||
|
auction_app_id = auction_app.id if auction_app else None
|
||||||
|
if requested_app_id != auction_app_id:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: requested application {record.attributes.application} "
|
||||||
|
f"does not match application from auction record "
|
||||||
|
f"{auction_records_by_id[0].attributes.application}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not auction:
|
||||||
|
logger.log(f"{record.id}: unable to locate auction {auction_id}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the deployer payment address is in auction winners list
|
||||||
|
if payment_address not in auction.winnerAddresses:
|
||||||
|
logger.log(f"{record.id}: deployer payment address not in auction winners.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|||||||
@ -21,34 +21,41 @@ from stack_orchestrator.repos import fetch_stack
|
|||||||
from stack_orchestrator.build import build_containers, fetch_containers
|
from stack_orchestrator.build import build_containers, fetch_containers
|
||||||
from stack_orchestrator.build import build_npms
|
from stack_orchestrator.build import build_npms
|
||||||
from stack_orchestrator.build import build_webapp
|
from stack_orchestrator.build import build_webapp
|
||||||
from stack_orchestrator.deploy.webapp import (run_webapp,
|
from stack_orchestrator.deploy.webapp import (
|
||||||
deploy_webapp,
|
run_webapp,
|
||||||
deploy_webapp_from_registry,
|
deploy_webapp,
|
||||||
undeploy_webapp_from_registry,
|
deploy_webapp_from_registry,
|
||||||
publish_webapp_deployer,
|
undeploy_webapp_from_registry,
|
||||||
request_webapp_deployment)
|
publish_webapp_deployer,
|
||||||
|
publish_deployment_auction,
|
||||||
|
handle_deployment_auction,
|
||||||
|
request_webapp_deployment,
|
||||||
|
request_webapp_undeployment,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy import deploy
|
from stack_orchestrator.deploy import deploy
|
||||||
from stack_orchestrator import version
|
from stack_orchestrator import version
|
||||||
from stack_orchestrator.deploy import deployment
|
from stack_orchestrator.deploy import deployment
|
||||||
from stack_orchestrator import opts
|
from stack_orchestrator import opts
|
||||||
from stack_orchestrator import update
|
from stack_orchestrator import update
|
||||||
|
|
||||||
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
|
||||||
|
|
||||||
|
|
||||||
@click.group(context_settings=CONTEXT_SETTINGS)
|
@click.group(context_settings=CONTEXT_SETTINGS)
|
||||||
@click.option('--stack', help="specify a stack to build/deploy")
|
@click.option("--stack", help="specify a stack to build/deploy")
|
||||||
@click.option('--quiet', is_flag=True, default=False)
|
@click.option("--quiet", is_flag=True, default=False)
|
||||||
@click.option('--verbose', is_flag=True, default=False)
|
@click.option("--verbose", is_flag=True, default=False)
|
||||||
@click.option('--dry-run', is_flag=True, default=False)
|
@click.option("--dry-run", is_flag=True, default=False)
|
||||||
@click.option('--local-stack', is_flag=True, default=False)
|
@click.option("--local-stack", is_flag=True, default=False)
|
||||||
@click.option('--debug', is_flag=True, default=False)
|
@click.option("--debug", is_flag=True, default=False)
|
||||||
@click.option('--continue-on-error', is_flag=True, default=False)
|
@click.option("--continue-on-error", is_flag=True, default=False)
|
||||||
# See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone
|
# See: https://click.palletsprojects.com/en/8.1.x/complex/#building-a-git-clone
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
||||||
"""Laconic Stack Orchestrator"""
|
"""Laconic Stack Orchestrator"""
|
||||||
command_options = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
|
command_options = CommandOptions(
|
||||||
|
stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error
|
||||||
|
)
|
||||||
opts.opts.o = command_options
|
opts.opts.o = command_options
|
||||||
ctx.obj = command_options
|
ctx.obj = command_options
|
||||||
|
|
||||||
@ -64,7 +71,10 @@ cli.add_command(deploy_webapp.command, "deploy-webapp")
|
|||||||
cli.add_command(deploy_webapp_from_registry.command, "deploy-webapp-from-registry")
|
cli.add_command(deploy_webapp_from_registry.command, "deploy-webapp-from-registry")
|
||||||
cli.add_command(undeploy_webapp_from_registry.command, "undeploy-webapp-from-registry")
|
cli.add_command(undeploy_webapp_from_registry.command, "undeploy-webapp-from-registry")
|
||||||
cli.add_command(publish_webapp_deployer.command, "publish-deployer-to-registry")
|
cli.add_command(publish_webapp_deployer.command, "publish-deployer-to-registry")
|
||||||
|
cli.add_command(publish_deployment_auction.command, "publish-deployment-auction")
|
||||||
|
cli.add_command(handle_deployment_auction.command, "handle-deployment-auction")
|
||||||
cli.add_command(request_webapp_deployment.command, "request-webapp-deployment")
|
cli.add_command(request_webapp_deployment.command, "request-webapp-deployment")
|
||||||
|
cli.add_command(request_webapp_undeployment.command, "request-webapp-undeployment")
|
||||||
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
|
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
|
||||||
cli.add_command(deploy.command, "deploy-system")
|
cli.add_command(deploy.command, "deploy-system")
|
||||||
cli.add_command(deployment.command, "deployment")
|
cli.add_command(deployment.command, "deployment")
|
||||||
|
|||||||
@ -17,4 +17,4 @@ from stack_orchestrator.command_types import CommandOptions
|
|||||||
|
|
||||||
|
|
||||||
class opts:
|
class opts:
|
||||||
o: CommandOptions = None
|
o: CommandOptions = None # type: ignore[assignment] # Set at runtime
|
||||||
|
|||||||
@ -29,14 +29,16 @@ from stack_orchestrator.util import error_exit
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.argument('stack-locator')
|
@click.argument("stack-locator")
|
||||||
@click.option('--git-ssh', is_flag=True, default=False)
|
@click.option("--git-ssh", is_flag=True, default=False)
|
||||||
@click.option('--check-only', is_flag=True, default=False)
|
@click.option("--check-only", is_flag=True, default=False)
|
||||||
@click.option('--pull', is_flag=True, default=False)
|
@click.option("--pull", is_flag=True, default=False)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
def command(ctx, stack_locator, git_ssh, check_only, pull):
|
||||||
'''optionally resolve then git clone a repository containing one or more stack definitions'''
|
"""Optionally resolve then git clone a repository with stack definitions."""
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
)
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -20,20 +20,26 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from decouple import config
|
from decouple import config
|
||||||
import git
|
import git
|
||||||
from git.exc import GitCommandError
|
from git.exc import GitCommandError, InvalidGitRepositoryError
|
||||||
|
from typing import Any
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import click
|
import click
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
from stack_orchestrator.opts import opts
|
from stack_orchestrator.opts import opts
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, include_exclude_check, error_exit, warn_exit
|
from stack_orchestrator.util import (
|
||||||
|
get_parsed_stack_config,
|
||||||
|
include_exclude_check,
|
||||||
|
error_exit,
|
||||||
|
warn_exit,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GitProgress(git.RemoteProgress):
|
class GitProgress(git.RemoteProgress):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.pbar = tqdm(unit='B', ascii=True, unit_scale=True)
|
self.pbar = tqdm(unit="B", ascii=True, unit_scale=True)
|
||||||
|
|
||||||
def update(self, op_code, cur_count, max_count=None, message=''):
|
def update(self, op_code, cur_count, max_count=None, message=""):
|
||||||
self.pbar.total = max_count
|
self.pbar.total = max_count
|
||||||
self.pbar.n = cur_count
|
self.pbar.n = cur_count
|
||||||
self.pbar.refresh()
|
self.pbar.refresh()
|
||||||
@ -43,17 +49,19 @@ def is_git_repo(path):
|
|||||||
try:
|
try:
|
||||||
_ = git.Repo(path).git_dir
|
_ = git.Repo(path).git_dir
|
||||||
return True
|
return True
|
||||||
except git.exc.InvalidGitRepositoryError:
|
except InvalidGitRepositoryError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
# TODO: find a place for this in the context of click
|
# TODO: find a place for this in the context of click
|
||||||
# parser = argparse.ArgumentParser(
|
# parser = argparse.ArgumentParser(
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars: "
|
||||||
|
# "CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
# )
|
# )
|
||||||
|
|
||||||
|
|
||||||
def branch_strip(s):
|
def branch_strip(s):
|
||||||
return s.split('@')[0]
|
return s.split("@")[0]
|
||||||
|
|
||||||
|
|
||||||
def host_and_path_for_repo(fully_qualified_repo):
|
def host_and_path_for_repo(fully_qualified_repo):
|
||||||
@ -63,10 +71,14 @@ def host_and_path_for_repo(fully_qualified_repo):
|
|||||||
# Legacy unqualified repo means github
|
# Legacy unqualified repo means github
|
||||||
if len(repo_host_split) == 2:
|
if len(repo_host_split) == 2:
|
||||||
return "github.com", "/".join(repo_host_split), repo_branch
|
return "github.com", "/".join(repo_host_split), repo_branch
|
||||||
|
elif len(repo_host_split) == 3:
|
||||||
|
# First part is the host
|
||||||
|
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
|
||||||
else:
|
else:
|
||||||
if len(repo_host_split) == 3:
|
raise ValueError(
|
||||||
# First part is the host
|
f"Invalid repository format: {fully_qualified_repo}. "
|
||||||
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
|
"Expected format: host/org/repo or org/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/18659425/get-git-current-branch-tag-name
|
# See: https://stackoverflow.com/questions/18659425/get-git-current-branch-tag-name
|
||||||
@ -74,43 +86,64 @@ def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
|
|||||||
current_repo_branch_or_tag = "***UNDETERMINED***"
|
current_repo_branch_or_tag = "***UNDETERMINED***"
|
||||||
is_branch = False
|
is_branch = False
|
||||||
try:
|
try:
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).active_branch.name
|
current_repo_branch_or_tag = git.Repo(
|
||||||
|
full_filesystem_repo_path
|
||||||
|
).active_branch.name
|
||||||
is_branch = True
|
is_branch = True
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# This means that the current ref is not a branch, so possibly a tag
|
# This means that the current ref is not a branch, so possibly a tag
|
||||||
# Let's try to get the tag
|
# Let's try to get the tag
|
||||||
try:
|
try:
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
|
current_repo_branch_or_tag = git.Repo(
|
||||||
# Note that git is asymmetric -- the tag you told it to check out may not be the one
|
full_filesystem_repo_path
|
||||||
# you get back here (if there are multiple tags associated with the same commit)
|
).git.describe("--tags", "--exact-match")
|
||||||
|
# Note that git is asymmetric -- the tag you told it to check out
|
||||||
|
# may not be the one you get back here (if there are multiple tags
|
||||||
|
# associated with the same commit)
|
||||||
except GitCommandError:
|
except GitCommandError:
|
||||||
# If there is no matching branch or tag checked out, just use the current SHA
|
# If there is no matching branch or tag checked out, just use the current
|
||||||
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
|
# SHA
|
||||||
|
current_repo_branch_or_tag = (
|
||||||
|
git.Repo(full_filesystem_repo_path).commit("HEAD").hexsha
|
||||||
|
)
|
||||||
return current_repo_branch_or_tag, is_branch
|
return current_repo_branch_or_tag, is_branch
|
||||||
|
|
||||||
|
|
||||||
# TODO: fix the messy arg list here
|
# TODO: fix the messy arg list here
|
||||||
def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
|
def process_repo(
|
||||||
|
pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo
|
||||||
|
):
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Processing repo: {fully_qualified_repo}")
|
print(f"Processing repo: {fully_qualified_repo}")
|
||||||
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
|
||||||
git_ssh_prefix = f"git@{repo_host}:"
|
git_ssh_prefix = f"git@{repo_host}:"
|
||||||
git_http_prefix = f"https://{repo_host}/"
|
git_http_prefix = f"https://{repo_host}/"
|
||||||
full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
|
full_github_repo_path = (
|
||||||
|
f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
|
||||||
|
)
|
||||||
repoName = repo_path.split("/")[-1]
|
repoName = repo_path.split("/")[-1]
|
||||||
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
|
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
|
||||||
is_present = os.path.isdir(full_filesystem_repo_path)
|
is_present = os.path.isdir(full_filesystem_repo_path)
|
||||||
(current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(
|
(current_repo_branch_or_tag, is_branch) = (
|
||||||
full_filesystem_repo_path
|
_get_repo_current_branch_or_tag(full_filesystem_repo_path)
|
||||||
) if is_present else (None, None)
|
if is_present
|
||||||
|
else (None, None)
|
||||||
|
)
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
present_text = f"already exists active {'branch' if is_branch else 'ref'}: {current_repo_branch_or_tag}" if is_present \
|
present_text = (
|
||||||
else 'Needs to be fetched'
|
f"already exists active {'branch' if is_branch else 'ref'}: "
|
||||||
|
f"{current_repo_branch_or_tag}"
|
||||||
|
if is_present
|
||||||
|
else "Needs to be fetched"
|
||||||
|
)
|
||||||
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
|
||||||
# Quick check that it's actually a repo
|
# Quick check that it's actually a repo
|
||||||
if is_present:
|
if is_present:
|
||||||
if not is_git_repo(full_filesystem_repo_path):
|
if not is_git_repo(full_filesystem_repo_path):
|
||||||
print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
|
print(
|
||||||
|
f"Error: {full_filesystem_repo_path} does not contain "
|
||||||
|
"a valid git repository"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
if pull:
|
if pull:
|
||||||
@ -128,11 +161,18 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
|||||||
if not is_present:
|
if not is_present:
|
||||||
# Clone
|
# Clone
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
|
print(
|
||||||
|
f"Running git clone for {full_github_repo_path} "
|
||||||
|
f"into {full_filesystem_repo_path}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
git.Repo.clone_from(full_github_repo_path,
|
# Cast to Any to work around GitPython's incomplete type stubs
|
||||||
full_filesystem_repo_path,
|
progress: Any = None if opts.o.quiet else GitProgress()
|
||||||
progress=None if opts.o.quiet else GitProgress())
|
git.Repo.clone_from(
|
||||||
|
full_github_repo_path,
|
||||||
|
full_filesystem_repo_path,
|
||||||
|
progress=progress,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("(git clone skipped)")
|
print("(git clone skipped)")
|
||||||
# Checkout the requested branch, if one was specified
|
# Checkout the requested branch, if one was specified
|
||||||
@ -150,9 +190,9 @@ def process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, fully
|
|||||||
|
|
||||||
if branch_to_checkout:
|
if branch_to_checkout:
|
||||||
if current_repo_branch_or_tag is None or (
|
if current_repo_branch_or_tag is None or (
|
||||||
current_repo_branch_or_tag and (
|
current_repo_branch_or_tag
|
||||||
current_repo_branch_or_tag != branch_to_checkout)
|
and (current_repo_branch_or_tag != branch_to_checkout)
|
||||||
):
|
):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
|
||||||
git_repo = git.Repo(full_filesystem_repo_path)
|
git_repo = git.Repo(full_filesystem_repo_path)
|
||||||
@ -180,14 +220,14 @@ def parse_branches(branches_string):
|
|||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only clone these repositories")
|
@click.option("--include", help="only clone these repositories")
|
||||||
@click.option("--exclude", help="don\'t clone these repositories")
|
@click.option("--exclude", help="don't clone these repositories")
|
||||||
@click.option('--git-ssh', is_flag=True, default=False)
|
@click.option("--git-ssh", is_flag=True, default=False)
|
||||||
@click.option('--check-only', is_flag=True, default=False)
|
@click.option("--check-only", is_flag=True, default=False)
|
||||||
@click.option('--pull', is_flag=True, default=False)
|
@click.option("--pull", is_flag=True, default=False)
|
||||||
@click.option("--branches", help="override branches for repositories")
|
@click.option("--branches", help="override branches for repositories")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
||||||
'''git clone the set of repositories required to build the complete system from source'''
|
"""git clone the set of repositories required to build the system."""
|
||||||
|
|
||||||
quiet = opts.o.quiet
|
quiet = opts.o.quiet
|
||||||
verbose = opts.o.verbose
|
verbose = opts.o.verbose
|
||||||
@ -204,22 +244,30 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
|||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}")
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
)
|
||||||
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "repository-list.txt") as repository_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "repository-list.txt"
|
||||||
|
) as repository_list_file:
|
||||||
all_repos = repository_list_file.read().splitlines()
|
all_repos = repository_list_file.read().splitlines()
|
||||||
|
|
||||||
repos_in_scope = []
|
repos_in_scope = []
|
||||||
@ -247,5 +295,5 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches):
|
|||||||
for repo in repos:
|
for repo in repos:
|
||||||
try:
|
try:
|
||||||
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
process_repo(pull, check_only, git_ssh, dev_root_path, branches_array, repo)
|
||||||
except git.exc.GitCommandError as error:
|
except GitCommandError as error:
|
||||||
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
error_exit(f"\n******* git command returned error exit status:\n{error}")
|
||||||
|
|||||||
@ -29,7 +29,7 @@ from stack_orchestrator.util import get_yaml
|
|||||||
def _download_url(url: str, file_path: Path):
|
def _download_url(url: str, file_path: Path):
|
||||||
r = requests.get(url, stream=True)
|
r = requests.get(url, stream=True)
|
||||||
r.raw.decode_content = True
|
r.raw.decode_content = True
|
||||||
with open(file_path, 'wb') as f:
|
with open(file_path, "wb") as f:
|
||||||
shutil.copyfileobj(r.raw, f)
|
shutil.copyfileobj(r.raw, f)
|
||||||
|
|
||||||
|
|
||||||
@ -40,12 +40,14 @@ def _error_exit(s: str):
|
|||||||
|
|
||||||
# Note at present this probably won't work on non-Unix based OSes like Windows
|
# Note at present this probably won't work on non-Unix based OSes like Windows
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--check-only", is_flag=True, default=False, help="only check, don't update")
|
@click.option(
|
||||||
|
"--check-only", is_flag=True, default=False, help="only check, don't update"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, check_only):
|
def command(ctx, check_only):
|
||||||
'''update shiv binary from a distribution url'''
|
"""update shiv binary from a distribution url"""
|
||||||
# Get the distribution URL from config
|
# Get the distribution URL from config
|
||||||
config_key = 'distribution-url'
|
config_key = "distribution-url"
|
||||||
config_file_path = Path(os.path.expanduser("~/.laconic-so/config.yml"))
|
config_file_path = Path(os.path.expanduser("~/.laconic-so/config.yml"))
|
||||||
if not config_file_path.exists():
|
if not config_file_path.exists():
|
||||||
_error_exit(f"Error: Config file: {config_file_path} not found")
|
_error_exit(f"Error: Config file: {config_file_path} not found")
|
||||||
@ -59,7 +61,9 @@ def command(ctx, check_only):
|
|||||||
_error_exit(f"ERROR: distribution url: {distribution_url} is not valid")
|
_error_exit(f"ERROR: distribution url: {distribution_url} is not valid")
|
||||||
# Figure out the filename for ourselves
|
# Figure out the filename for ourselves
|
||||||
shiv_binary_path = Path(sys.argv[0])
|
shiv_binary_path = Path(sys.argv[0])
|
||||||
timestamp_filename = f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
|
timestamp_filename = (
|
||||||
|
f"laconic-so-download-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
|
||||||
|
)
|
||||||
temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename)
|
temp_download_path = shiv_binary_path.parent.joinpath(timestamp_filename)
|
||||||
# Download the file to a temp filename
|
# Download the file to a temp filename
|
||||||
if ctx.obj.verbose:
|
if ctx.obj.verbose:
|
||||||
@ -87,4 +91,4 @@ def command(ctx, check_only):
|
|||||||
print(f"Replacing: {shiv_binary_path} with {temp_download_path}")
|
print(f"Replacing: {shiv_binary_path} with {temp_download_path}")
|
||||||
os.replace(temp_download_path, shiv_binary_path)
|
os.replace(temp_download_path, shiv_binary_path)
|
||||||
if not ctx.obj.quiet:
|
if not ctx.obj.quiet:
|
||||||
print("Run \"laconic-so version\" to see the newly installed version")
|
print('Run "laconic-so version" to see the newly installed version')
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import sys
|
|||||||
import ruamel.yaml
|
import ruamel.yaml
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from dotenv import dotenv_values
|
from dotenv import dotenv_values
|
||||||
from typing import Mapping, Set, List
|
from typing import Mapping, NoReturn, Optional, Set, List
|
||||||
from stack_orchestrator.constants import stack_file_name, deployment_file_name
|
from stack_orchestrator.constants import stack_file_name, deployment_file_name
|
||||||
|
|
||||||
|
|
||||||
@ -38,8 +38,10 @@ def get_stack_path(stack):
|
|||||||
if stack_is_external(stack):
|
if stack_is_external(stack):
|
||||||
stack_path = Path(stack)
|
stack_path = Path(stack)
|
||||||
else:
|
else:
|
||||||
# In order to be compatible with Python 3.8 we need to use this hack to get the path:
|
# In order to be compatible with Python 3.8 we need to use this hack
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# to get the path:
|
||||||
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
|
# python-get-path-of-root-project-structure
|
||||||
stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
|
stack_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack)
|
||||||
return stack_path
|
return stack_path
|
||||||
|
|
||||||
@ -47,10 +49,15 @@ def get_stack_path(stack):
|
|||||||
def get_dev_root_path(ctx):
|
def get_dev_root_path(ctx):
|
||||||
if ctx and ctx.local_stack:
|
if ctx and ctx.local_stack:
|
||||||
# TODO: This code probably doesn't work
|
# TODO: This code probably doesn't work
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
str(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
|
)
|
||||||
return dev_root_path
|
return dev_root_path
|
||||||
|
|
||||||
|
|
||||||
@ -78,6 +85,22 @@ def get_pod_list(parsed_stack):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_list(parsed_stack):
|
||||||
|
# Return list of jobs from stack config, or empty list if no jobs defined
|
||||||
|
if "jobs" not in parsed_stack:
|
||||||
|
return []
|
||||||
|
jobs = parsed_stack["jobs"]
|
||||||
|
if not jobs:
|
||||||
|
return []
|
||||||
|
if type(jobs[0]) is str:
|
||||||
|
result = jobs
|
||||||
|
else:
|
||||||
|
result = []
|
||||||
|
for job in jobs:
|
||||||
|
result.append(job["name"])
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_plugin_code_paths(stack) -> List[Path]:
|
def get_plugin_code_paths(stack) -> List[Path]:
|
||||||
parsed_stack = get_parsed_stack_config(stack)
|
parsed_stack = get_parsed_stack_config(stack)
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
@ -86,7 +109,9 @@ def get_plugin_code_paths(stack) -> List[Path]:
|
|||||||
if type(pod) is str:
|
if type(pod) is str:
|
||||||
result.add(get_stack_path(stack))
|
result.add(get_stack_path(stack))
|
||||||
else:
|
else:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"]
|
||||||
|
)
|
||||||
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
result.add(Path(os.path.join(pod_root_dir, "stack")))
|
||||||
return list(result)
|
return list(result)
|
||||||
|
|
||||||
@ -119,25 +144,61 @@ def resolve_compose_file(stack, pod_name: str):
|
|||||||
return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
return compose_base.joinpath(f"docker-compose-{pod_name}.yml")
|
||||||
|
|
||||||
|
|
||||||
|
# Find a job compose file in compose-jobs directory
|
||||||
|
def resolve_job_compose_file(stack, job_name: str):
|
||||||
|
if stack_is_external(stack):
|
||||||
|
# First try looking in the external stack for the job compose file
|
||||||
|
compose_jobs_base = Path(stack).parent.parent.joinpath("compose-jobs")
|
||||||
|
proposed_file = compose_jobs_base.joinpath(f"docker-compose-{job_name}.yml")
|
||||||
|
if proposed_file.exists():
|
||||||
|
return proposed_file
|
||||||
|
# If we don't find it fall through to the internal case
|
||||||
|
# TODO: Add internal compose-jobs directory support if needed
|
||||||
|
# For now, jobs are expected to be in external stacks only
|
||||||
|
compose_jobs_base = Path(stack).parent.parent.joinpath("compose-jobs")
|
||||||
|
return compose_jobs_base.joinpath(f"docker-compose-{job_name}.yml")
|
||||||
|
|
||||||
|
|
||||||
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
def get_pod_file_path(stack, parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
|
result = None
|
||||||
if type(pods[0]) is str:
|
if type(pods[0]) is str:
|
||||||
result = resolve_compose_file(stack, pod_name)
|
result = resolve_compose_file(stack, pod_name)
|
||||||
else:
|
else:
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if pod["name"] == pod_name:
|
if pod["name"] == pod_name:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None),
|
||||||
|
pod["repository"].split("/")[-1],
|
||||||
|
pod["path"],
|
||||||
|
)
|
||||||
result = os.path.join(pod_root_dir, "docker-compose.yml")
|
result = os.path.join(pod_root_dir, "docker-compose.yml")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_file_path(stack, parsed_stack, job_name: str):
|
||||||
|
if "jobs" not in parsed_stack or not parsed_stack["jobs"]:
|
||||||
|
return None
|
||||||
|
jobs = parsed_stack["jobs"]
|
||||||
|
if type(jobs[0]) is str:
|
||||||
|
result = resolve_job_compose_file(stack, job_name)
|
||||||
|
else:
|
||||||
|
# TODO: Support complex job definitions if needed
|
||||||
|
result = resolve_job_compose_file(stack, job_name)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_pod_script_paths(parsed_stack, pod_name: str):
|
def get_pod_script_paths(parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
result = []
|
result = []
|
||||||
if not type(pods[0]) is str:
|
if not type(pods[0]) is str:
|
||||||
for pod in pods:
|
for pod in pods:
|
||||||
if pod["name"] == pod_name:
|
if pod["name"] == pod_name:
|
||||||
pod_root_dir = os.path.join(get_dev_root_path(None), pod["repository"].split("/")[-1], pod["path"])
|
pod_root_dir = os.path.join(
|
||||||
|
get_dev_root_path(None),
|
||||||
|
pod["repository"].split("/")[-1],
|
||||||
|
pod["path"],
|
||||||
|
)
|
||||||
if "pre_start_command" in pod:
|
if "pre_start_command" in pod:
|
||||||
result.append(os.path.join(pod_root_dir, pod["pre_start_command"]))
|
result.append(os.path.join(pod_root_dir, pod["pre_start_command"]))
|
||||||
if "post_start_command" in pod:
|
if "post_start_command" in pod:
|
||||||
@ -147,6 +208,7 @@ def get_pod_script_paths(parsed_stack, pod_name: str):
|
|||||||
|
|
||||||
def pod_has_scripts(parsed_stack, pod_name: str):
|
def pod_has_scripts(parsed_stack, pod_name: str):
|
||||||
pods = parsed_stack["pods"]
|
pods = parsed_stack["pods"]
|
||||||
|
result = False
|
||||||
if type(pods[0]) is str:
|
if type(pods[0]) is str:
|
||||||
result = False
|
result = False
|
||||||
else:
|
else:
|
||||||
@ -158,7 +220,8 @@ def pod_has_scripts(parsed_stack, pod_name: str):
|
|||||||
|
|
||||||
def get_internal_compose_file_dir():
|
def get_internal_compose_file_dir():
|
||||||
# TODO: refactor to use common code with deploy command
|
# TODO: refactor to use common code with deploy command
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See:
|
||||||
|
# https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
data_dir = Path(__file__).absolute().parent.joinpath("data")
|
||||||
source_compose_dir = data_dir.joinpath("compose")
|
source_compose_dir = data_dir.joinpath("compose")
|
||||||
return source_compose_dir
|
return source_compose_dir
|
||||||
@ -180,9 +243,7 @@ def get_k8s_dir():
|
|||||||
def get_parsed_deployment_spec(spec_file):
|
def get_parsed_deployment_spec(spec_file):
|
||||||
spec_file_path = Path(spec_file)
|
spec_file_path = Path(spec_file)
|
||||||
try:
|
try:
|
||||||
with spec_file_path:
|
return get_yaml().load(open(spec_file_path, "r"))
|
||||||
deploy_spec = get_yaml().load(open(spec_file_path, "r"))
|
|
||||||
return deploy_spec
|
|
||||||
except FileNotFoundError as error:
|
except FileNotFoundError as error:
|
||||||
# We try here to generate a useful diagnostic error
|
# We try here to generate a useful diagnostic error
|
||||||
print(f"Error: spec file: {spec_file_path} does not exist")
|
print(f"Error: spec file: {spec_file_path} does not exist")
|
||||||
@ -222,15 +283,15 @@ def global_options2(ctx):
|
|||||||
return ctx.parent.obj
|
return ctx.parent.obj
|
||||||
|
|
||||||
|
|
||||||
def error_exit(s):
|
def error_exit(s) -> NoReturn:
|
||||||
print(f"ERROR: {s}")
|
print(f"ERROR: {s}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def warn_exit(s):
|
def warn_exit(s) -> NoReturn:
|
||||||
print(f"WARN: {s}")
|
print(f"WARN: {s}")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def env_var_map_from_file(file: Path) -> Mapping[str, str]:
|
def env_var_map_from_file(file: Path) -> Mapping[str, Optional[str]]:
|
||||||
return dotenv_values(file)
|
return dotenv_values(file)
|
||||||
|
|||||||
@ -20,10 +20,11 @@ from importlib import resources, metadata
|
|||||||
@click.command()
|
@click.command()
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx):
|
def command(ctx):
|
||||||
'''print tool version'''
|
"""print tool version"""
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
|
||||||
if resources.is_resource(data, "build_tag.txt"):
|
if resources.is_resource(data, "build_tag.txt"):
|
||||||
with resources.open_text(data, "build_tag.txt") as version_file:
|
with resources.open_text(data, "build_tag.txt") as version_file:
|
||||||
# TODO: code better version that skips comment lines
|
# TODO: code better version that skips comment lines
|
||||||
|
|||||||
@ -14,8 +14,13 @@ delete_cluster_exit () {
|
|||||||
|
|
||||||
# Test basic stack-orchestrator deploy
|
# Test basic stack-orchestrator deploy
|
||||||
echo "Running stack-orchestrator deploy test"
|
echo "Running stack-orchestrator deploy test"
|
||||||
# Bit of a hack, test the most recent package
|
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
if [ "$1" == "from-path" ]; then
|
||||||
|
TEST_TARGET_SO="laconic-so"
|
||||||
|
else
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
fi
|
||||||
|
|
||||||
# Set a non-default repo dir
|
# Set a non-default repo dir
|
||||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
|||||||
@ -40,7 +40,7 @@ sleep 3
|
|||||||
wget --tries 20 --retry-connrefused --waitretry=3 -O test.before -m http://localhost:3000
|
wget --tries 20 --retry-connrefused --waitretry=3 -O test.before -m http://localhost:3000
|
||||||
|
|
||||||
docker logs $CONTAINER_ID
|
docker logs $CONTAINER_ID
|
||||||
docker remove -f $CONTAINER_ID
|
docker rm -f $CONTAINER_ID
|
||||||
|
|
||||||
echo "Running app container test"
|
echo "Running app container test"
|
||||||
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d ${app_image_name})
|
CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d ${app_image_name})
|
||||||
@ -48,7 +48,7 @@ sleep 3
|
|||||||
wget --tries 20 --retry-connrefused --waitretry=3 -O test.after -m http://localhost:3000
|
wget --tries 20 --retry-connrefused --waitretry=3 -O test.after -m http://localhost:3000
|
||||||
|
|
||||||
docker logs $CONTAINER_ID
|
docker logs $CONTAINER_ID
|
||||||
docker remove -f $CONTAINER_ID
|
docker rm -f $CONTAINER_ID
|
||||||
|
|
||||||
echo "###########################################################################"
|
echo "###########################################################################"
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user