Compare commits
1 Commits
main
...
pm-update-
| Author | SHA1 | Date | |
|---|---|---|---|
| 2fce823123 |
@ -1,23 +1,19 @@
|
|||||||
name: K8s Deployment Control Test
|
name: Fixturenet-Eth-Plugeth-Arm-Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-arm-test'
|
||||||
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
|
||||||
- 'tests/k8s-deployment-control/run-test.sh'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '3 30 * * *'
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run deployment control suite on kind/k8s"
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest-arm
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -36,22 +32,13 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Check cgroups version"
|
- name: "Run fixturenet-eth tests"
|
||||||
run: mount | grep cgroup
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
- name: "Install kind"
|
|
||||||
run: ./tests/scripts/install-kind.sh
|
|
||||||
- name: "Install Kubectl"
|
|
||||||
run: ./tests/scripts/install-kubectl.sh
|
|
||||||
- name: "Run k8s deployment control test"
|
|
||||||
run: |
|
|
||||||
source /opt/bash-utils/cgroup-helper.sh
|
|
||||||
join_cgroup
|
|
||||||
./tests/k8s-deployment-control/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
57
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
57
.gitea/workflows/fixturenet-eth-plugeth-test.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
name: Fixturenet-Eth-Plugeth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
||||||
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
|
- cron: '2 14 * * *'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum plugeth fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
55
.gitea/workflows/fixturenet-eth-test.yml
Normal file
55
.gitea/workflows/fixturenet-eth-test.yml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
name: Fixturenet-Eth-Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: '*'
|
||||||
|
paths:
|
||||||
|
- '!**'
|
||||||
|
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: "Run an Ethereum fixturenet test"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Clone project repository"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
# At present the stock setup-python action fails on Linux/aarch64
|
||||||
|
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||||
|
- name: "Install Python for ARM on Linux"
|
||||||
|
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||||
|
uses: deadsnakes/action@v3.0.1
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Install Python cases other than ARM on Linux"
|
||||||
|
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
- name: "Print Python version"
|
||||||
|
run: python3 --version
|
||||||
|
- name: "Install shiv"
|
||||||
|
run: pip install shiv
|
||||||
|
- name: "Generate build version file"
|
||||||
|
run: ./scripts/create_build_tag_file.sh
|
||||||
|
- name: "Build local shiv package"
|
||||||
|
run: ./scripts/build_shiv_package.sh
|
||||||
|
- name: "Run fixturenet-eth tests"
|
||||||
|
run: ./tests/fixturenet-eth/run-test.sh
|
||||||
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
||||||
|
- name: Notify DeepStack Slack on CI failure
|
||||||
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
|
with:
|
||||||
|
status: ${{ job.status }}
|
||||||
|
notify_when: 'failure'
|
||||||
|
env:
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
||||||
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -34,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -36,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,8 +2,7 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches: '*'
|
||||||
- main
|
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -33,7 +32,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv==1.0.6
|
run: pip install shiv
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-arm-test CI job
|
||||||
|
|
||||||
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
3
.gitea/workflows/triggers/fixturenet-eth-plugeth-test
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
||||||
|
trigger
|
||||||
|
trigger
|
||||||
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
2
.gitea/workflows/triggers/fixturenet-eth-test
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
@ -7,4 +7,3 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
|
||||||
|
|||||||
@ -1,3 +1 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
Triggered: 2026-01-21
|
|
||||||
Triggered: 2026-01-21 19:28:29
|
|
||||||
@ -1,2 +1,2 @@
|
|||||||
Change this file to trigger running the test-database CI job
|
Change this file to trigger running the test-database CI job
|
||||||
Trigger test run
|
Trigger test run
|
||||||
@ -1 +1,2 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
@ -1,34 +0,0 @@
|
|||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v5.0.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
args: ['--allow-multiple-documents']
|
|
||||||
- id: check-json
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-added-large-files
|
|
||||||
|
|
||||||
- repo: https://github.com/psf/black
|
|
||||||
rev: 23.12.1
|
|
||||||
hooks:
|
|
||||||
- id: black
|
|
||||||
language_version: python3
|
|
||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
|
||||||
rev: 7.1.1
|
|
||||||
hooks:
|
|
||||||
- id: flake8
|
|
||||||
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
|
||||||
|
|
||||||
- repo: https://github.com/RobertCraigie/pyright-python
|
|
||||||
rev: v1.1.345
|
|
||||||
hooks:
|
|
||||||
- id: pyright
|
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint
|
|
||||||
rev: v1.35.1
|
|
||||||
hooks:
|
|
||||||
- id: yamllint
|
|
||||||
args: [-d, relaxed]
|
|
||||||
@ -1,151 +0,0 @@
|
|||||||
# Plan: Make Stack-Orchestrator AI-Friendly
|
|
||||||
|
|
||||||
## Goal
|
|
||||||
|
|
||||||
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 1: Documentation & Context Files
|
|
||||||
|
|
||||||
### 1.1 Add CLAUDE.md
|
|
||||||
|
|
||||||
Create a root-level context file for AI assistants.
|
|
||||||
|
|
||||||
**File:** `CLAUDE.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Project overview (what stack-orchestrator does)
|
|
||||||
- Stack creation workflow (step-by-step)
|
|
||||||
- File naming conventions
|
|
||||||
- Required vs optional fields in stack.yml
|
|
||||||
- Common patterns and anti-patterns
|
|
||||||
- Links to example stacks (simple, medium, complex)
|
|
||||||
|
|
||||||
### 1.2 Add JSON Schema for stack.yml
|
|
||||||
|
|
||||||
Create formal validation schema.
|
|
||||||
|
|
||||||
**File:** `schemas/stack-schema.json`
|
|
||||||
|
|
||||||
Benefits:
|
|
||||||
- AI tools can validate generated stacks
|
|
||||||
- IDEs provide autocomplete
|
|
||||||
- CI can catch errors early
|
|
||||||
|
|
||||||
### 1.3 Add Template Stack with Comments
|
|
||||||
|
|
||||||
Create an annotated template for reference.
|
|
||||||
|
|
||||||
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Stack definition template - copy this directory to create a new stack
|
|
||||||
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
|
||||||
name: my-stack # Required: lowercase, hyphens only
|
|
||||||
description: "Human-readable description" # Optional
|
|
||||||
repos: # Git repositories to clone
|
|
||||||
- github.com/org/repo
|
|
||||||
containers: # Container images to build (must have matching container-build/)
|
|
||||||
- cerc/my-container
|
|
||||||
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
|
||||||
- my-pod
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.4 Document Validation Rules
|
|
||||||
|
|
||||||
Create explicit documentation of constraints currently scattered in code.
|
|
||||||
|
|
||||||
**File:** `docs/stack-format.md`
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
- Container names must start with `cerc/`
|
|
||||||
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
|
||||||
- Repository format: `host/org/repo[@ref]`
|
|
||||||
- Stack directory name should match `name` field
|
|
||||||
- Version field options and differences
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 2: Add `create-stack` Command
|
|
||||||
|
|
||||||
### 2.1 Command Overview
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Behavior:**
|
|
||||||
1. Parse repo URL to extract app name (if --name not provided)
|
|
||||||
2. Create `stacks/{name}/stack.yml`
|
|
||||||
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
|
||||||
4. Create `compose/docker-compose-{name}.yml`
|
|
||||||
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
|
||||||
|
|
||||||
### 2.2 Files to Create
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | Package init |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
|
||||||
|
|
||||||
### 2.3 Files to Modify
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
### 2.4 Command Options
|
|
||||||
|
|
||||||
| Option | Required | Description |
|
|
||||||
|--------|----------|-------------|
|
|
||||||
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
|
||||||
| `--name` | No | Stack name (defaults to repo name) |
|
|
||||||
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
|
||||||
| `--force` | No | Overwrite existing files |
|
|
||||||
|
|
||||||
### 2.5 Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| service | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| empty | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Part 3: Implementation Summary
|
|
||||||
|
|
||||||
### New Files (6)
|
|
||||||
|
|
||||||
1. `CLAUDE.md` - AI assistant context
|
|
||||||
2. `schemas/stack-schema.json` - Validation schema
|
|
||||||
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
|
||||||
4. `docs/stack-format.md` - Stack format documentation
|
|
||||||
5. `stack_orchestrator/create/__init__.py` - Package init
|
|
||||||
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
|
||||||
|
|
||||||
### Modified Files (1)
|
|
||||||
|
|
||||||
1. `stack_orchestrator/main.py` - Register create-stack command
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# 3. Creates all expected files
|
|
||||||
laconic-so create-stack --repo github.com/org/test-app
|
|
||||||
ls stack_orchestrator/data/stacks/test-app/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-test-app/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
|
||||||
|
|
||||||
# 4. Build works with generated stack
|
|
||||||
laconic-so --stack test-app build-containers
|
|
||||||
```
|
|
||||||
50
CLAUDE.md
50
CLAUDE.md
@ -1,50 +0,0 @@
|
|||||||
# CLAUDE.md
|
|
||||||
|
|
||||||
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
|
||||||
|
|
||||||
## Some rules to follow
|
|
||||||
NEVER speculate about the cause of something
|
|
||||||
NEVER assume your hypotheses are true without evidence
|
|
||||||
|
|
||||||
ALWAYS clearly state when something is a hypothesis
|
|
||||||
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
|
||||||
|
|
||||||
## Key Principles
|
|
||||||
|
|
||||||
### Development Guidelines
|
|
||||||
- **Single responsibility** - Each component has one clear purpose
|
|
||||||
- **Fail fast** - Let errors propagate, don't hide failures
|
|
||||||
- **DRY/KISS** - Minimize duplication and complexity
|
|
||||||
|
|
||||||
## Development Philosophy: Conversational Literate Programming
|
|
||||||
|
|
||||||
### Approach
|
|
||||||
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
|
||||||
|
|
||||||
### Core Principles
|
|
||||||
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
|
||||||
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
|
||||||
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
|
||||||
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
|
||||||
|
|
||||||
### Working Method
|
|
||||||
1. **Explore and Understand**: Read existing code to understand current state
|
|
||||||
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
|
||||||
3. **Document Intent**: Update TODO.md with clear justification before coding
|
|
||||||
4. **Explain Changes**: Each modification includes reasoning and context
|
|
||||||
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
|
||||||
|
|
||||||
### Implementation Guidelines
|
|
||||||
- Treat conversations as primary documentation
|
|
||||||
- Explain architectural decisions before implementing
|
|
||||||
- Use TODO.md as the "literate document" that justifies all work
|
|
||||||
- Maintain clear narrative threads across sessions
|
|
||||||
- Workshop complex ideas before coding
|
|
||||||
|
|
||||||
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
|
||||||
|
|
||||||
## Insights and Observations
|
|
||||||
|
|
||||||
### Design Principles
|
|
||||||
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
|
||||||
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
|
||||||
2
LICENSE
2
LICENSE
@ -658,4 +658,4 @@
|
|||||||
You should also get your employer (if you work as a programmer) or school,
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
<http://www.gnu.org/licenses/>.
|
<http://www.gnu.org/licenses/>.
|
||||||
@ -26,7 +26,7 @@ curl -SL https://github.com/docker/compose/releases/download/v2.11.2/docker-comp
|
|||||||
chmod +x ~/.docker/cli-plugins/docker-compose
|
chmod +x ~/.docker/cli-plugins/docker-compose
|
||||||
```
|
```
|
||||||
|
|
||||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||||
|
|
||||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||||
@ -78,3 +78,5 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,413 +0,0 @@
|
|||||||
# Implementing `laconic-so create-stack` Command
|
|
||||||
|
|
||||||
A plan for adding a new CLI command to scaffold stack files automatically.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Add a `create-stack` command that generates all required files for a new stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so create-stack --name my-stack --type webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output:**
|
|
||||||
```
|
|
||||||
stack_orchestrator/data/
|
|
||||||
├── stacks/my-stack/stack.yml
|
|
||||||
├── container-build/cerc-my-stack/
|
|
||||||
│ ├── Dockerfile
|
|
||||||
│ └── build.sh
|
|
||||||
└── compose/docker-compose-my-stack.yml
|
|
||||||
|
|
||||||
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## CLI Architecture Summary
|
|
||||||
|
|
||||||
### Command Registration Pattern
|
|
||||||
|
|
||||||
Commands are Click functions registered in `main.py`:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# main.py (line ~70)
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Global Options Access
|
|
||||||
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print("message")
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print("(would create files)")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Key Utilities
|
|
||||||
|
|
||||||
| Function | Location | Purpose |
|
|
||||||
|----------|----------|---------|
|
|
||||||
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
|
||||||
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
|
||||||
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files to Create
|
|
||||||
|
|
||||||
### 1. Command Module
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/__init__.py`**
|
|
||||||
```python
|
|
||||||
# Empty file to make this a package
|
|
||||||
```
|
|
||||||
|
|
||||||
**`stack_orchestrator/create/create_stack.py`**
|
|
||||||
```python
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import copy
|
|
||||||
from stack_orchestrator.opts import opts
|
|
||||||
from stack_orchestrator.util import error_exit, get_yaml
|
|
||||||
|
|
||||||
# Template types
|
|
||||||
STACK_TEMPLATES = {
|
|
||||||
"webapp": {
|
|
||||||
"description": "Web application with Node.js",
|
|
||||||
"base_image": "node:20-bullseye-slim",
|
|
||||||
"port": 3000,
|
|
||||||
},
|
|
||||||
"service": {
|
|
||||||
"description": "Backend service",
|
|
||||||
"base_image": "python:3.11-slim",
|
|
||||||
"port": 8080,
|
|
||||||
},
|
|
||||||
"empty": {
|
|
||||||
"description": "Minimal stack with no defaults",
|
|
||||||
"base_image": None,
|
|
||||||
"port": None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_data_dir() -> Path:
|
|
||||||
"""Get path to stack_orchestrator/data directory"""
|
|
||||||
return Path(__file__).absolute().parent.parent.joinpath("data")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_stack_name(name: str) -> None:
|
|
||||||
"""Validate stack name follows conventions"""
|
|
||||||
import re
|
|
||||||
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
|
||||||
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
|
||||||
if name.startswith("cerc-"):
|
|
||||||
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
|
||||||
|
|
||||||
|
|
||||||
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
|
||||||
"""Create stack.yml file"""
|
|
||||||
config = {
|
|
||||||
"version": "1.2",
|
|
||||||
"name": name,
|
|
||||||
"description": template.get("description", f"Stack: {name}"),
|
|
||||||
"repos": [repo_url] if repo_url else [],
|
|
||||||
"containers": [f"cerc/{name}"],
|
|
||||||
"pods": [name],
|
|
||||||
}
|
|
||||||
|
|
||||||
stack_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(stack_dir / "stack.yml", "w") as f:
|
|
||||||
get_yaml().dump(config, f)
|
|
||||||
|
|
||||||
|
|
||||||
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create Dockerfile"""
|
|
||||||
base_image = template.get("base_image", "node:20-bullseye-slim")
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
dockerfile_content = f'''# Build stage
|
|
||||||
FROM {base_image} AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci
|
|
||||||
COPY . .
|
|
||||||
RUN npm run build
|
|
||||||
|
|
||||||
# Production stage
|
|
||||||
FROM {base_image}
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm ci --only=production
|
|
||||||
COPY --from=builder /app/dist ./dist
|
|
||||||
|
|
||||||
EXPOSE {port}
|
|
||||||
CMD ["npm", "run", "start"]
|
|
||||||
'''
|
|
||||||
|
|
||||||
container_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(container_dir / "Dockerfile", "w") as f:
|
|
||||||
f.write(dockerfile_content)
|
|
||||||
|
|
||||||
|
|
||||||
def create_build_script(container_dir: Path, name: str) -> None:
|
|
||||||
"""Create build.sh script"""
|
|
||||||
build_script = f'''#!/usr/bin/env bash
|
|
||||||
# Build cerc/{name}
|
|
||||||
|
|
||||||
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
|
||||||
|
|
||||||
docker build -t cerc/{name}:local \\
|
|
||||||
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
|
||||||
${{build_command_args}} \\
|
|
||||||
${{CERC_REPO_BASE_DIR}}/{name}
|
|
||||||
'''
|
|
||||||
|
|
||||||
build_path = container_dir / "build.sh"
|
|
||||||
with open(build_path, "w") as f:
|
|
||||||
f.write(build_script)
|
|
||||||
|
|
||||||
# Make executable
|
|
||||||
os.chmod(build_path, 0o755)
|
|
||||||
|
|
||||||
|
|
||||||
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
|
||||||
"""Create docker-compose file"""
|
|
||||||
port = template.get("port", 3000)
|
|
||||||
|
|
||||||
compose_content = {
|
|
||||||
"version": "3.8",
|
|
||||||
"services": {
|
|
||||||
name: {
|
|
||||||
"image": f"cerc/{name}:local",
|
|
||||||
"restart": "unless-stopped",
|
|
||||||
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
|
||||||
"environment": {
|
|
||||||
"NODE_ENV": "${NODE_ENV:-production}",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
|
||||||
get_yaml().dump(compose_content, f)
|
|
||||||
|
|
||||||
|
|
||||||
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
|
||||||
"""Add entry to a list file if not already present"""
|
|
||||||
list_path = data_dir / filename
|
|
||||||
|
|
||||||
# Read existing entries
|
|
||||||
existing = set()
|
|
||||||
if list_path.exists():
|
|
||||||
with open(list_path, "r") as f:
|
|
||||||
existing = set(line.strip() for line in f if line.strip())
|
|
||||||
|
|
||||||
# Add new entry
|
|
||||||
if entry not in existing:
|
|
||||||
with open(list_path, "a") as f:
|
|
||||||
f.write(f"{entry}\n")
|
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
|
||||||
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
|
||||||
@click.option("--type", "stack_type", default="webapp",
|
|
||||||
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
|
||||||
help="Stack template type")
|
|
||||||
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
|
||||||
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
|
||||||
@click.pass_context
|
|
||||||
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
|
||||||
"""Create a new stack with all required files.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-app --type webapp
|
|
||||||
|
|
||||||
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
|
||||||
"""
|
|
||||||
# Validate
|
|
||||||
validate_stack_name(name)
|
|
||||||
|
|
||||||
template = STACK_TEMPLATES[stack_type]
|
|
||||||
data_dir = get_data_dir()
|
|
||||||
|
|
||||||
# Define paths
|
|
||||||
stack_dir = data_dir / "stacks" / name
|
|
||||||
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
|
||||||
compose_dir = data_dir / "compose"
|
|
||||||
|
|
||||||
# Check for existing files
|
|
||||||
if not force:
|
|
||||||
if stack_dir.exists():
|
|
||||||
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
|
||||||
if container_dir.exists():
|
|
||||||
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
|
||||||
|
|
||||||
# Dry run check
|
|
||||||
if opts.o.dry_run:
|
|
||||||
print(f"Would create stack '{name}' with template '{stack_type}':")
|
|
||||||
print(f" - {stack_dir}/stack.yml")
|
|
||||||
print(f" - {container_dir}/Dockerfile")
|
|
||||||
print(f" - {container_dir}/build.sh")
|
|
||||||
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
print(f" - Update repository-list.txt")
|
|
||||||
print(f" - Update container-image-list.txt")
|
|
||||||
print(f" - Update pod-list.txt")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Create files
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
|
||||||
|
|
||||||
create_stack_yml(stack_dir, name, template, repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {stack_dir}/stack.yml")
|
|
||||||
|
|
||||||
create_dockerfile(container_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/Dockerfile")
|
|
||||||
|
|
||||||
create_build_script(container_dir, name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {container_dir}/build.sh")
|
|
||||||
|
|
||||||
create_compose_file(compose_dir, name, template)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
|
||||||
|
|
||||||
# Update list files
|
|
||||||
if repo:
|
|
||||||
update_list_file(data_dir, "repository-list.txt", repo)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {repo} to repository-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added cerc/{name} to container-image-list.txt")
|
|
||||||
|
|
||||||
update_list_file(data_dir, "pod-list.txt", name)
|
|
||||||
if opts.o.verbose:
|
|
||||||
print(f" Added {name} to pod-list.txt")
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
if not opts.o.quiet:
|
|
||||||
print(f"\nStack '{name}' created successfully!")
|
|
||||||
print(f"\nNext steps:")
|
|
||||||
print(f" 1. Edit {stack_dir}/stack.yml")
|
|
||||||
print(f" 2. Customize {container_dir}/Dockerfile")
|
|
||||||
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
|
||||||
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Register Command in main.py
|
|
||||||
|
|
||||||
**Edit `stack_orchestrator/main.py`**
|
|
||||||
|
|
||||||
Add import:
|
|
||||||
```python
|
|
||||||
from stack_orchestrator.create import create_stack
|
|
||||||
```
|
|
||||||
|
|
||||||
Add command registration (after line ~78):
|
|
||||||
```python
|
|
||||||
cli.add_command(create_stack.command, "create-stack")
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Implementation Steps
|
|
||||||
|
|
||||||
### Step 1: Create module structure
|
|
||||||
```bash
|
|
||||||
mkdir -p stack_orchestrator/create
|
|
||||||
touch stack_orchestrator/create/__init__.py
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Create the command file
|
|
||||||
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
|
||||||
|
|
||||||
### Step 3: Register in main.py
|
|
||||||
Add the import and `cli.add_command()` line.
|
|
||||||
|
|
||||||
### Step 4: Test the command
|
|
||||||
```bash
|
|
||||||
# Show help
|
|
||||||
laconic-so create-stack --help
|
|
||||||
|
|
||||||
# Dry run
|
|
||||||
laconic-so --dry-run create-stack --name test-app --type webapp
|
|
||||||
|
|
||||||
# Create a stack
|
|
||||||
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
ls -la stack_orchestrator/data/stacks/test-app/
|
|
||||||
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Template Types
|
|
||||||
|
|
||||||
| Type | Base Image | Port | Use Case |
|
|
||||||
|------|------------|------|----------|
|
|
||||||
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
|
||||||
| `service` | python:3.11-slim | 8080 | Python backend services |
|
|
||||||
| `empty` | none | none | Custom from scratch |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Future Enhancements
|
|
||||||
|
|
||||||
1. **Interactive mode** - Prompt for values if not provided
|
|
||||||
2. **More templates** - Go, Rust, database stacks
|
|
||||||
3. **Template from existing** - `--from-stack existing-stack`
|
|
||||||
4. **External stack support** - Create in custom directory
|
|
||||||
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Files Modified
|
|
||||||
|
|
||||||
| File | Change |
|
|
||||||
|------|--------|
|
|
||||||
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
|
||||||
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
|
||||||
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Command appears in help
|
|
||||||
laconic-so --help | grep create-stack
|
|
||||||
|
|
||||||
# 2. Dry run works
|
|
||||||
laconic-so --dry-run create-stack --name verify-test --type webapp
|
|
||||||
|
|
||||||
# 3. Full creation works
|
|
||||||
laconic-so create-stack --name verify-test --type webapp
|
|
||||||
ls stack_orchestrator/data/stacks/verify-test/
|
|
||||||
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
|
||||||
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
|
|
||||||
# 4. Build works
|
|
||||||
laconic-so --stack verify-test build-containers
|
|
||||||
|
|
||||||
# 5. Cleanup
|
|
||||||
rm -rf stack_orchestrator/data/stacks/verify-test
|
|
||||||
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
|
||||||
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
|
||||||
```
|
|
||||||
16
TODO.md
16
TODO.md
@ -1,16 +0,0 @@
|
|||||||
# TODO
|
|
||||||
|
|
||||||
## Features Needed
|
|
||||||
|
|
||||||
### Update Stack Command
|
|
||||||
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
|
||||||
|
|
||||||
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
|
||||||
|
|
||||||
## Architecture Refactoring
|
|
||||||
|
|
||||||
### Separate Deployer from Stack Orchestrator CLI
|
|
||||||
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
|
||||||
|
|
||||||
### Separate Stacks from Stack Orchestrator Repo
|
|
||||||
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
|
||||||
@ -1,550 +0,0 @@
|
|||||||
# Docker Compose Deployment Guide
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
### What is a Deployer?
|
|
||||||
|
|
||||||
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
|
||||||
|
|
||||||
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
To deploy stacks using Docker Compose, you need:
|
|
||||||
|
|
||||||
- Docker Engine (20.10+)
|
|
||||||
- Docker Compose plugin (v2.0+)
|
|
||||||
- Python 3.8+
|
|
||||||
- stack-orchestrator installed (`laconic-so`)
|
|
||||||
|
|
||||||
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
|
||||||
|
|
||||||
## Deployment Workflow
|
|
||||||
|
|
||||||
The typical deployment workflow consists of four main steps:
|
|
||||||
|
|
||||||
1. **Setup repositories and build containers** (first time only)
|
|
||||||
2. **Initialize deployment specification**
|
|
||||||
3. **Create deployment directory**
|
|
||||||
4. **Start and manage services**
|
|
||||||
|
|
||||||
## Quick Start Example
|
|
||||||
|
|
||||||
Here's a complete example using the built-in `test` stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Step 1: Setup (first time only)
|
|
||||||
laconic-so --stack test setup-repositories
|
|
||||||
laconic-so --stack test build-containers
|
|
||||||
|
|
||||||
# Step 2: Initialize deployment spec
|
|
||||||
laconic-so --stack test deploy init --output test-spec.yml
|
|
||||||
|
|
||||||
# Step 3: Create deployment directory
|
|
||||||
laconic-so --stack test deploy create \
|
|
||||||
--spec-file test-spec.yml \
|
|
||||||
--deployment-dir test-deployment
|
|
||||||
|
|
||||||
# Step 4: Start services
|
|
||||||
laconic-so deployment --dir test-deployment start
|
|
||||||
|
|
||||||
# View running services
|
|
||||||
laconic-so deployment --dir test-deployment ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir test-deployment logs
|
|
||||||
|
|
||||||
# Stop services (preserves data)
|
|
||||||
laconic-so deployment --dir test-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
Stack-orchestrator supports two deployment workflows:
|
|
||||||
|
|
||||||
### 1. Deployment Directory Workflow (Recommended)
|
|
||||||
|
|
||||||
This workflow creates a persistent deployment directory that contains all configuration and data.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Production deployments
|
|
||||||
- When you need to preserve configuration
|
|
||||||
- When you want to manage multiple deployments
|
|
||||||
- When you need persistent volume data
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Optionally edit eth-spec.yml to customize configuration
|
|
||||||
|
|
||||||
# Create deployment directory
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir my-eth-deployment
|
|
||||||
|
|
||||||
# Start the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment start
|
|
||||||
|
|
||||||
# Manage the deployment
|
|
||||||
laconic-so deployment --dir my-eth-deployment ps
|
|
||||||
laconic-so deployment --dir my-eth-deployment logs
|
|
||||||
laconic-so deployment --dir my-eth-deployment stop
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Quick Deploy Workflow
|
|
||||||
|
|
||||||
This workflow deploys directly without creating a persistent deployment directory.
|
|
||||||
|
|
||||||
**When to use:**
|
|
||||||
- Quick testing
|
|
||||||
- Temporary deployments
|
|
||||||
- Simple stacks that don't require customization
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start the stack directly
|
|
||||||
laconic-so --stack test deploy up
|
|
||||||
|
|
||||||
# Check service status
|
|
||||||
laconic-so --stack test deploy port test 80
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack test deploy logs
|
|
||||||
|
|
||||||
# Stop (preserves volumes)
|
|
||||||
laconic-so --stack test deploy down
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so --stack test deploy down --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Real-World Example: Ethereum Fixturenet
|
|
||||||
|
|
||||||
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Setup (first time only)
|
|
||||||
laconic-so --stack fixturenet-eth setup-repositories
|
|
||||||
laconic-so --stack fixturenet-eth build-containers
|
|
||||||
|
|
||||||
# Initialize with default configuration
|
|
||||||
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
|
||||||
|
|
||||||
# Create deployment
|
|
||||||
laconic-so --stack fixturenet-eth deploy create \
|
|
||||||
--spec-file eth-spec.yml \
|
|
||||||
--deployment-dir fixturenet-eth-deployment
|
|
||||||
|
|
||||||
# Start the network
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment ps
|
|
||||||
|
|
||||||
# Access logs from specific service
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
|
||||||
|
|
||||||
# Stop the network (preserves blockchain data)
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop
|
|
||||||
|
|
||||||
# Start again - blockchain data is preserved
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment start
|
|
||||||
|
|
||||||
# Clean up everything including data
|
|
||||||
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Passing Configuration Parameters
|
|
||||||
|
|
||||||
Configuration can be passed in three ways:
|
|
||||||
|
|
||||||
**1. At init time via `--config` flag:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack test deploy init --output spec.yml \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**2. Edit the spec file after init:**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize
|
|
||||||
laconic-so --stack test deploy init --output spec.yml
|
|
||||||
|
|
||||||
# Edit spec.yml
|
|
||||||
vim spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Example spec.yml:
|
|
||||||
```yaml
|
|
||||||
stack: test
|
|
||||||
config:
|
|
||||||
PARAM1: value1
|
|
||||||
PARAM2: value2
|
|
||||||
```
|
|
||||||
|
|
||||||
**3. Docker Compose defaults:**
|
|
||||||
|
|
||||||
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
|
||||||
|
|
||||||
### Port Mapping
|
|
||||||
|
|
||||||
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Find the host port for container port 80 on service 'webapp'
|
|
||||||
laconic-so deployment --dir my-deployment port webapp 80
|
|
||||||
|
|
||||||
# Output example: 0.0.0.0:32768
|
|
||||||
```
|
|
||||||
|
|
||||||
To configure fixed ports, edit the spec file before creating the deployment:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8080:80' # Maps host port 8080 to container port 80
|
|
||||||
api:
|
|
||||||
- '3000:3000'
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create the deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack my-stack deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volume Persistence
|
|
||||||
|
|
||||||
Volumes are preserved between stop/start cycles by default:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop but keep data
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Start again - data is still there
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
To completely remove all data:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Stop and delete all volumes
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
Volume data is stored in `<deployment-dir>/data/`.
|
|
||||||
|
|
||||||
## Common Operations
|
|
||||||
|
|
||||||
### Viewing Logs
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# All services, continuous follow
|
|
||||||
laconic-so deployment --dir my-deployment logs --follow
|
|
||||||
|
|
||||||
# Last 100 lines from all services
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 100
|
|
||||||
|
|
||||||
# Specific service only
|
|
||||||
laconic-so deployment --dir my-deployment logs webapp
|
|
||||||
|
|
||||||
# Combine options
|
|
||||||
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
|
||||||
```
|
|
||||||
|
|
||||||
### Executing Commands in Containers
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Execute a command in a running service
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp ls -la
|
|
||||||
|
|
||||||
# Interactive shell
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
|
||||||
|
|
||||||
# Run command with specific environment variables
|
|
||||||
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
|
||||||
```
|
|
||||||
|
|
||||||
### Checking Service Status
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# List all running services
|
|
||||||
laconic-so deployment --dir my-deployment ps
|
|
||||||
|
|
||||||
# Check using Docker directly
|
|
||||||
docker ps
|
|
||||||
```
|
|
||||||
|
|
||||||
### Updating a Running Deployment
|
|
||||||
|
|
||||||
If you need to change configuration after deployment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Edit the spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
|
|
||||||
# 2. Regenerate configuration
|
|
||||||
laconic-so deployment --dir my-deployment update
|
|
||||||
|
|
||||||
# 3. Restart services to apply changes
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
## Multi-Service Deployments
|
|
||||||
|
|
||||||
Many stacks deploy multiple services that work together:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Deploy a stack with multiple services
|
|
||||||
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
|
||||||
laconic-so --stack laconicd-with-console deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir laconicd-deployment
|
|
||||||
|
|
||||||
laconic-so deployment --dir laconicd-deployment start
|
|
||||||
|
|
||||||
# View all services
|
|
||||||
laconic-so deployment --dir laconicd-deployment ps
|
|
||||||
|
|
||||||
# View logs from specific services
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs laconicd
|
|
||||||
laconic-so deployment --dir laconicd-deployment logs console
|
|
||||||
```
|
|
||||||
|
|
||||||
## ConfigMaps
|
|
||||||
|
|
||||||
ConfigMaps allow you to mount configuration files into containers:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 1. Create the config directory in your deployment
|
|
||||||
mkdir -p my-deployment/data/my-config
|
|
||||||
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
|
||||||
|
|
||||||
# 2. Reference in spec file
|
|
||||||
vim my-deployment/spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Add to spec.yml:
|
|
||||||
```yaml
|
|
||||||
configmaps:
|
|
||||||
my-config: ./data/my-config
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# 3. Restart to apply
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
|
||||||
|
|
||||||
## Deployment Directory Structure
|
|
||||||
|
|
||||||
A typical deployment directory contains:
|
|
||||||
|
|
||||||
```
|
|
||||||
my-deployment/
|
|
||||||
├── compose/
|
|
||||||
│ └── docker-compose-*.yml # Generated compose files
|
|
||||||
├── config.env # Environment variables
|
|
||||||
├── deployment.yml # Deployment metadata
|
|
||||||
├── spec.yml # Deployment specification
|
|
||||||
└── data/ # Volume mounts and configs
|
|
||||||
├── service-data/ # Persistent service data
|
|
||||||
└── config-maps/ # ConfigMap files
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
**Problem: "Cannot connect to Docker daemon"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Ensure Docker is running
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Start Docker if needed (macOS)
|
|
||||||
open -a Docker
|
|
||||||
|
|
||||||
# Start Docker (Linux)
|
|
||||||
sudo systemctl start docker
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Port already in use"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Either stop the conflicting service or use different ports
|
|
||||||
# Edit spec.yml before creating deployment:
|
|
||||||
|
|
||||||
network:
|
|
||||||
ports:
|
|
||||||
webapp:
|
|
||||||
- '8081:80' # Use 8081 instead of 8080
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: "Image not found"**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build containers first
|
|
||||||
laconic-so --stack your-stack build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Volumes not persisting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check if you used --delete-volumes when stopping
|
|
||||||
# Volume data is in: <deployment-dir>/data/
|
|
||||||
|
|
||||||
# Don't use --delete-volumes if you want to keep data:
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
|
|
||||||
# Only use --delete-volumes when you want to reset completely:
|
|
||||||
laconic-so deployment --dir my-deployment stop --delete-volumes
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem: Services not starting**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check logs for errors
|
|
||||||
laconic-so deployment --dir my-deployment logs
|
|
||||||
|
|
||||||
# Check Docker container status
|
|
||||||
docker ps -a
|
|
||||||
|
|
||||||
# Try stopping and starting again
|
|
||||||
laconic-so deployment --dir my-deployment stop
|
|
||||||
laconic-so deployment --dir my-deployment start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Inspecting Deployment State
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check deployment directory structure
|
|
||||||
ls -la my-deployment/
|
|
||||||
|
|
||||||
# Check running containers
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
# Check container details
|
|
||||||
docker inspect <container-name>
|
|
||||||
|
|
||||||
# Check networks
|
|
||||||
docker network ls
|
|
||||||
|
|
||||||
# Check volumes
|
|
||||||
docker volume ls
|
|
||||||
```
|
|
||||||
|
|
||||||
## CLI Commands Reference
|
|
||||||
|
|
||||||
### Stack Operations
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone required repositories
|
|
||||||
laconic-so --stack <name> setup-repositories
|
|
||||||
|
|
||||||
# Build container images
|
|
||||||
laconic-so --stack <name> build-containers
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Initialization
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize deployment spec with defaults
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file>
|
|
||||||
|
|
||||||
# Initialize with configuration
|
|
||||||
laconic-so --stack <name> deploy init --output <spec-file> \
|
|
||||||
--config PARAM1=value1,PARAM2=value2
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Creation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Create deployment directory from spec
|
|
||||||
laconic-so --stack <name> deploy create \
|
|
||||||
--spec-file <spec-file> \
|
|
||||||
--deployment-dir <dir>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deployment Management
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start all services
|
|
||||||
laconic-so deployment --dir <dir> start
|
|
||||||
|
|
||||||
# Stop services (preserves volumes)
|
|
||||||
laconic-so deployment --dir <dir> stop
|
|
||||||
|
|
||||||
# Stop and remove volumes
|
|
||||||
laconic-so deployment --dir <dir> stop --delete-volumes
|
|
||||||
|
|
||||||
# List running services
|
|
||||||
laconic-so deployment --dir <dir> ps
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
|
||||||
|
|
||||||
# Show mapped port
|
|
||||||
laconic-so deployment --dir <dir> port <service> <private-port>
|
|
||||||
|
|
||||||
# Execute command in service
|
|
||||||
laconic-so deployment --dir <dir> exec <service> <command>
|
|
||||||
|
|
||||||
# Update configuration
|
|
||||||
laconic-so deployment --dir <dir> update
|
|
||||||
```
|
|
||||||
|
|
||||||
### Quick Deploy Commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start stack directly
|
|
||||||
laconic-so --stack <name> deploy up
|
|
||||||
|
|
||||||
# Stop stack
|
|
||||||
laconic-so --stack <name> deploy down [--delete-volumes]
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
laconic-so --stack <name> deploy logs
|
|
||||||
|
|
||||||
# Show port mapping
|
|
||||||
laconic-so --stack <name> deploy port <service> <port>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Related Documentation
|
|
||||||
|
|
||||||
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
|
||||||
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
|
||||||
- [Specification](./spec.md) - Internal structure and design
|
|
||||||
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
|
||||||
- [Web App Deployment](./webapp.md) - Deploying web applications
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
For more examples, see the test scripts:
|
|
||||||
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
|
||||||
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
|
||||||
|
|
||||||
## Summary
|
|
||||||
|
|
||||||
- Docker Compose is the default and recommended deployment mode
|
|
||||||
- Two workflows: deployment directory (recommended) or quick deploy
|
|
||||||
- The standard workflow is: setup → build → init → create → start
|
|
||||||
- Configuration is flexible with multiple override layers
|
|
||||||
- Volume persistence is automatic unless explicitly deleted
|
|
||||||
- All deployment state is contained in the deployment directory
|
|
||||||
- For Kubernetes deployments, see separate K8s documentation
|
|
||||||
|
|
||||||
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
|
||||||
@ -1,9 +1,9 @@
|
|||||||
# Fetching pre-built container images
|
# Fetching pre-built container images
|
||||||
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
||||||
|
|
||||||
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
||||||
## Usage
|
## Usage
|
||||||
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
||||||
```
|
```
|
||||||
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
||||||
```
|
```
|
||||||
@ -7,7 +7,7 @@ Deploy a local Gitea server, publish NPM packages to it, then use those packages
|
|||||||
```bash
|
```bash
|
||||||
laconic-so --stack build-support build-containers
|
laconic-so --stack build-support build-containers
|
||||||
laconic-so --stack package-registry setup-repositories
|
laconic-so --stack package-registry setup-repositories
|
||||||
laconic-so --stack package-registry build-containers
|
laconic-so --stack package-registry build-containers
|
||||||
laconic-so --stack package-registry deploy up
|
laconic-so --stack package-registry deploy up
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -1,113 +0,0 @@
|
|||||||
# Helm Chart Generation
|
|
||||||
|
|
||||||
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Install Kompose:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Linux
|
|
||||||
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
|
||||||
chmod +x kompose
|
|
||||||
sudo mv kompose /usr/local/bin/
|
|
||||||
|
|
||||||
# macOS
|
|
||||||
brew install kompose
|
|
||||||
|
|
||||||
# Verify
|
|
||||||
kompose version
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### 1. Create spec file
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output spec.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Generate Helm chart
|
|
||||||
|
|
||||||
```bash
|
|
||||||
laconic-so --stack <stack-name> deploy create \
|
|
||||||
--spec-file spec.yml \
|
|
||||||
--deployment-dir my-deployment \
|
|
||||||
--helm-chart
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Deploy to Kubernetes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
helm install my-release my-deployment/chart
|
|
||||||
kubectl get pods -n zenith
|
|
||||||
```
|
|
||||||
|
|
||||||
## Output Structure
|
|
||||||
|
|
||||||
```bash
|
|
||||||
my-deployment/
|
|
||||||
├── spec.yml # Reference
|
|
||||||
├── stack.yml # Reference
|
|
||||||
└── chart/ # Helm chart
|
|
||||||
├── Chart.yaml
|
|
||||||
├── README.md
|
|
||||||
└── templates/
|
|
||||||
└── *.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Generate chart for stage1-zenithd
|
|
||||||
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
|
||||||
--kube-config ~/.kube/config \
|
|
||||||
--output stage1-spec.yml
|
|
||||||
|
|
||||||
laconic-so --stack stage1-zenithd deploy create \
|
|
||||||
--spec-file stage1-spec.yml \
|
|
||||||
--deployment-dir stage1-deployment \
|
|
||||||
--helm-chart
|
|
||||||
|
|
||||||
# Deploy
|
|
||||||
helm install stage1-zenithd stage1-deployment/chart
|
|
||||||
```
|
|
||||||
|
|
||||||
## Production Deployment (TODO)
|
|
||||||
|
|
||||||
### Local Development
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Access services using port-forward
|
|
||||||
kubectl port-forward service/zenithd 26657:26657
|
|
||||||
kubectl port-forward service/nginx-api-proxy 1317:80
|
|
||||||
kubectl port-forward service/cosmos-explorer 4173:4173
|
|
||||||
```
|
|
||||||
|
|
||||||
### Production Access Options
|
|
||||||
|
|
||||||
- Option 1: Ingress + cert-manager (Recommended)
|
|
||||||
- Install ingress-nginx + cert-manager
|
|
||||||
- Point DNS to cluster LoadBalancer IP
|
|
||||||
- Auto-provisions Let's Encrypt TLS certs
|
|
||||||
- Access: `https://api.zenith.example.com`
|
|
||||||
- Option 2: Cloud LoadBalancer
|
|
||||||
- Use cloud provider's LoadBalancer service type
|
|
||||||
- Point DNS to assigned external IP
|
|
||||||
- Manual TLS cert management
|
|
||||||
- Option 3: Bare Metal (MetalLB + Ingress)
|
|
||||||
- MetalLB provides LoadBalancer IPs from local network
|
|
||||||
- Same Ingress setup as cloud
|
|
||||||
- Option 4: NodePort + External Proxy
|
|
||||||
- Expose services on 30000-32767 range
|
|
||||||
- External nginx/Caddy proxies 80/443 → NodePort
|
|
||||||
- Manual cert management
|
|
||||||
|
|
||||||
### Changes Needed
|
|
||||||
|
|
||||||
- Add Ingress template to charts
|
|
||||||
- Add TLS configuration to values.yaml
|
|
||||||
- Document cert-manager setup
|
|
||||||
- Add production deployment guide
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# K8S Deployment Enhancements
|
|
||||||
## Controlling pod placement
|
|
||||||
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
|
||||||
### Pod/Node Affinity
|
|
||||||
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
|
||||||
|
|
||||||
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-affinities:
|
|
||||||
- label: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
|
||||||
### Node Taint Toleration
|
|
||||||
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
|
||||||
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
|
||||||
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
|
||||||
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
|
||||||
|
|
||||||
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
|
||||||
```
|
|
||||||
node-tolerations:
|
|
||||||
- key: nodetype
|
|
||||||
value: typeb
|
|
||||||
```
|
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
|
||||||
@ -26,3 +26,4 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,9 @@ Note: this page is out of date (but still useful) - it will no longer be useful
|
|||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
|
|
||||||
The orchestrator's operation is driven by files shown below.
|
The orchestrator's operation is driven by files shown below.
|
||||||
|
|
||||||
- `repository-list.txt` contains the list of git repositories;
|
- `repository-list.txt` contains the list of git repositories;
|
||||||
- `container-image-list.txt` contains the list of container image names
|
- `container-image-list.txt` contains the list of container image names
|
||||||
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
||||||
- `container-build/` contains the files required to build each container image
|
- `container-build/` contains the files required to build each container image
|
||||||
|
|||||||
@ -7,7 +7,7 @@ compilation and static page generation are separated in the `build-webapp` and `
|
|||||||
|
|
||||||
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
||||||
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
||||||
not their build environment.
|
not their build environment.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
|
|||||||
@ -1,128 +0,0 @@
|
|||||||
# Deploying to the Laconic Network
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
|
||||||
|
|
||||||
## Key Documentation in stack-orchestrator
|
|
||||||
|
|
||||||
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
|
||||||
- `docs/webapp.md` - Webapp building/running
|
|
||||||
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
|
||||||
|
|
||||||
## Core Concepts
|
|
||||||
|
|
||||||
### LRN (Laconic Resource Name)
|
|
||||||
Format: `lrn://laconic/[namespace]/[name]`
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
- `lrn://laconic/deployers/my-deployer-name`
|
|
||||||
- `lrn://laconic/dns/example.com`
|
|
||||||
- `lrn://laconic/deployments/example.com`
|
|
||||||
|
|
||||||
### Registry Record Types
|
|
||||||
|
|
||||||
| Record Type | Purpose |
|
|
||||||
|-------------|---------|
|
|
||||||
| `ApplicationRecord` | Published app metadata |
|
|
||||||
| `WebappDeployer` | Deployment service offering |
|
|
||||||
| `ApplicationDeploymentRequest` | User's request to deploy |
|
|
||||||
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
|
||||||
| `ApplicationDeploymentRecord` | Completed deployment result |
|
|
||||||
|
|
||||||
## Deployment Workflows
|
|
||||||
|
|
||||||
### 1. Direct Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentRequest
|
|
||||||
→ targets specific WebappDeployer (by LRN)
|
|
||||||
→ includes payment TX hash
|
|
||||||
→ Deployer picks up request, builds, deploys, publishes result
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Auction-Based Deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
User publishes ApplicationDeploymentAuction
|
|
||||||
→ Deployers bid (commit/reveal phases)
|
|
||||||
→ Winner selected
|
|
||||||
→ User publishes request targeting winner
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key CLI Commands
|
|
||||||
|
|
||||||
### Publish a Deployer Service
|
|
||||||
```bash
|
|
||||||
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
|
||||||
--api-url https://deployer-api.example.com \
|
|
||||||
--name my-deployer \
|
|
||||||
--payment-address laconic1... \
|
|
||||||
--minimum-payment 1000alnt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Request Deployment (User Side)
|
|
||||||
```bash
|
|
||||||
laconic-so request-webapp-deployment --laconic-config config.yml \
|
|
||||||
--app lrn://laconic/apps/my-app \
|
|
||||||
--deployer lrn://laconic/deployers/xyz \
|
|
||||||
--make-payment auto
|
|
||||||
```
|
|
||||||
|
|
||||||
### Run Deployer Service (Deployer Side)
|
|
||||||
```bash
|
|
||||||
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
|
||||||
```
|
|
||||||
|
|
||||||
## Laconic Config File
|
|
||||||
|
|
||||||
All tools require a laconic config file (`laconic.toml`):
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[cosmos]
|
|
||||||
address_prefix = "laconic"
|
|
||||||
chain_id = "laconic_9000-1"
|
|
||||||
endpoint = "http://localhost:26657"
|
|
||||||
key = "<account-name>"
|
|
||||||
password = "<account-password>"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting Up a Local Laconicd Network
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone and build
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded deploy create
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment start
|
|
||||||
|
|
||||||
# Check status
|
|
||||||
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key Implementation Files
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|------|---------|
|
|
||||||
| `publish_webapp_deployer.py` | Register deployment service on network |
|
|
||||||
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
|
||||||
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
|
||||||
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
|
||||||
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
|
||||||
| `request_webapp_undeployment.py` | Request app removal |
|
|
||||||
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
|
||||||
| `util.py` | LaconicRegistryClient - all registry interactions |
|
|
||||||
|
|
||||||
## Payment System
|
|
||||||
|
|
||||||
- **Token Denom**: `alnt` (Laconic network tokens)
|
|
||||||
- **Payment Options**:
|
|
||||||
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
|
||||||
- `--use-payment`: Reference existing payment TX
|
|
||||||
|
|
||||||
## What's NOT Well-Documented
|
|
||||||
|
|
||||||
1. No end-to-end tutorial for full deployment workflow
|
|
||||||
2. Stack publishing (vs webapp) process unclear
|
|
||||||
3. LRN naming conventions not formally specified
|
|
||||||
4. Payment economics and token mechanics
|
|
||||||
110
pyproject.toml
110
pyproject.toml
@ -1,110 +0,0 @@
|
|||||||
[build-system]
|
|
||||||
requires = ["setuptools>=61.0", "wheel"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
|
||||||
|
|
||||||
[project]
|
|
||||||
name = "laconic-stack-orchestrator"
|
|
||||||
version = "1.1.0"
|
|
||||||
description = "Orchestrates deployment of the Laconic stack"
|
|
||||||
readme = "README.md"
|
|
||||||
license = {text = "GNU Affero General Public License"}
|
|
||||||
authors = [
|
|
||||||
{name = "Cerc", email = "info@cerc.io"}
|
|
||||||
]
|
|
||||||
requires-python = ">=3.8"
|
|
||||||
classifiers = [
|
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
]
|
|
||||||
dependencies = [
|
|
||||||
"python-decouple>=3.8",
|
|
||||||
"python-dotenv==1.0.0",
|
|
||||||
"GitPython>=3.1.32",
|
|
||||||
"tqdm>=4.65.0",
|
|
||||||
"python-on-whales>=0.64.0",
|
|
||||||
"click>=8.1.6",
|
|
||||||
"PyYAML>=6.0.1",
|
|
||||||
"ruamel.yaml>=0.17.32",
|
|
||||||
"pydantic==1.10.9",
|
|
||||||
"tomli==2.0.1",
|
|
||||||
"validators==0.22.0",
|
|
||||||
"kubernetes>=28.1.0",
|
|
||||||
"humanfriendly>=10.0",
|
|
||||||
"python-gnupg>=0.5.2",
|
|
||||||
"requests>=2.3.2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
|
||||||
dev = [
|
|
||||||
"pytest>=7.0.0",
|
|
||||||
"pytest-cov>=4.0.0",
|
|
||||||
"black>=22.0.0",
|
|
||||||
"flake8>=5.0.0",
|
|
||||||
"pyright>=1.1.0",
|
|
||||||
"yamllint>=1.28.0",
|
|
||||||
"pre-commit>=3.0.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
laconic-so = "stack_orchestrator.main:cli"
|
|
||||||
|
|
||||||
[project.urls]
|
|
||||||
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
|
||||||
where = ["."]
|
|
||||||
|
|
||||||
[tool.setuptools.package-data]
|
|
||||||
"*" = ["data/**"]
|
|
||||||
|
|
||||||
[tool.black]
|
|
||||||
line-length = 88
|
|
||||||
target-version = ['py38']
|
|
||||||
|
|
||||||
[tool.flake8]
|
|
||||||
max-line-length = 88
|
|
||||||
extend-ignore = ["E203", "W503", "E402"]
|
|
||||||
|
|
||||||
[tool.pyright]
|
|
||||||
pythonVersion = "3.9"
|
|
||||||
typeCheckingMode = "basic"
|
|
||||||
reportMissingImports = "none"
|
|
||||||
reportMissingModuleSource = "none"
|
|
||||||
reportUnusedImport = "error"
|
|
||||||
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
|
||||||
exclude = ["**/build/**", "**/__pycache__/**"]
|
|
||||||
|
|
||||||
[tool.mypy]
|
|
||||||
python_version = "3.8"
|
|
||||||
warn_return_any = true
|
|
||||||
warn_unused_configs = true
|
|
||||||
disallow_untyped_defs = true
|
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
|
||||||
testpaths = ["tests"]
|
|
||||||
python_files = ["test_*.py"]
|
|
||||||
python_classes = ["Test*"]
|
|
||||||
python_functions = ["test_*"]
|
|
||||||
markers = [
|
|
||||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
|
||||||
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
|
||||||
]
|
|
||||||
addopts = [
|
|
||||||
"--cov",
|
|
||||||
"--cov-report=term-missing",
|
|
||||||
"--cov-report=html",
|
|
||||||
"--strict-markers",
|
|
||||||
]
|
|
||||||
asyncio_default_fixture_loop_scope = "function"
|
|
||||||
|
|
||||||
[tool.coverage.run]
|
|
||||||
source = ["stack_orchestrator"]
|
|
||||||
disable_warnings = ["couldnt-parse"]
|
|
||||||
|
|
||||||
[tool.coverage.report]
|
|
||||||
exclude_lines = [
|
|
||||||
"pragma: no cover",
|
|
||||||
"def __repr__",
|
|
||||||
"raise AssertionError",
|
|
||||||
"raise NotImplementedError",
|
|
||||||
]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"pythonVersion": "3.9",
|
|
||||||
"typeCheckingMode": "basic",
|
|
||||||
"reportMissingImports": "none",
|
|
||||||
"reportMissingModuleSource": "none",
|
|
||||||
"reportUnusedImport": "error",
|
|
||||||
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
|
||||||
"exclude": ["**/build/**", "**/__pycache__/**"]
|
|
||||||
}
|
|
||||||
@ -11,5 +11,3 @@ tomli==2.0.1
|
|||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
humanfriendly>=10.0
|
||||||
python-gnupg>=0.5.2
|
|
||||||
requests>=2.3.2
|
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
# https://github.com/cerc-io/github-release-api
|
# https://github.com/cerc-io/github-release-api
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
@ -94,7 +94,7 @@ sudo apt -y install jq
|
|||||||
# laconic-so depends on git
|
# laconic-so depends on git
|
||||||
sudo apt -y install git
|
sudo apt -y install git
|
||||||
# curl used below
|
# curl used below
|
||||||
sudo apt -y install curl
|
sudo apt -y install curl
|
||||||
# docker repo add depends on gnupg and updated ca-certificates
|
# docker repo add depends on gnupg and updated ca-certificates
|
||||||
sudo apt -y install ca-certificates gnupg
|
sudo apt -y install ca-certificates gnupg
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
# Uses this script package to tag a new release:
|
# Uses this script package to tag a new release:
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
26
setup.py
26
setup.py
@ -1,7 +1,5 @@
|
|||||||
# See
|
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
@ -9,26 +7,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name="laconic-stack-orchestrator",
|
name='laconic-stack-orchestrator',
|
||||||
version=version,
|
version=version,
|
||||||
author="Cerc",
|
author='Cerc',
|
||||||
author_email="info@cerc.io",
|
author_email='info@cerc.io',
|
||||||
license="GNU Affero General Public License",
|
license='GNU Affero General Public License',
|
||||||
description="Orchestrates deployment of the Laconic stack",
|
description='Orchestrates deployment of the Laconic stack',
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
||||||
py_modules=["stack_orchestrator"],
|
py_modules=['stack_orchestrator'],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires=">=3.7",
|
python_requires='>=3.7',
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={"": ["data/**"]},
|
package_data={'': ['data/**']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
||||||
},
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,10 +23,11 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return default_stack(config, stack)
|
return base_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -40,27 +41,15 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class default_stack(base_stack):
|
|
||||||
"""Default stack implementation for stacks without specific handling."""
|
|
||||||
|
|
||||||
def ensure_available(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_url(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class package_registry_stack(base_stack):
|
||||||
|
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(
|
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
||||||
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
|
||||||
f"{url_from_environment}"
|
|
||||||
)
|
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -73,16 +62,10 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the
|
# If not, print a message about how to start it and return fail to the caller
|
||||||
# caller
|
print("ERROR: The package-registry stack is not running, and no external registry "
|
||||||
print(
|
"specified with CERC_NPM_REGISTRY_URL")
|
||||||
"ERROR: The package-registry stack is not running, "
|
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
||||||
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"ERROR: Start the local package registry with: "
|
|
||||||
"laconic-so --stack package-registry deploy-system up"
|
|
||||||
)
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -93,9 +76,7 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = (
|
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
||||||
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
"CERC_NPM_AUTH_TOKEN", default=None
|
||||||
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
|
||||||
)
|
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -35,17 +34,14 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars:
|
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(
|
def make_container_build_env(dev_root_path: str,
|
||||||
dev_root_path: str,
|
container_build_dir: str,
|
||||||
container_build_dir: str,
|
debug: bool,
|
||||||
debug: bool,
|
force_rebuild: bool,
|
||||||
force_rebuild: bool,
|
extra_build_args: str):
|
||||||
extra_build_args: str,
|
|
||||||
):
|
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -54,15 +50,11 @@ def make_container_build_env(
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update(
|
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -75,18 +67,12 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update(
|
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
||||||
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
||||||
"container-build"
|
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
||||||
)
|
|
||||||
temp_build_dir = container_parent_dir.joinpath(
|
|
||||||
build_context.container.replace("/", "-")
|
|
||||||
)
|
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -104,34 +90,21 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"No script file found: {build_script_filename}, using default build script")
|
||||||
f"No script file found: {build_script_filename}, "
|
repo_dir = build_context.container.split('/')[1]
|
||||||
"using default build script"
|
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
||||||
)
|
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
||||||
repo_dir = build_context.container.split("/")[1]
|
|
||||||
# TODO: make this less of a hack -- should be specified in
|
|
||||||
# some metadata somewhere. Check if we have a repo for this
|
|
||||||
# container. If not, set the context dir to container-build subdir
|
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = (
|
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
build_command = os.path.join(build_context.container_build_dir,
|
||||||
)
|
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
build_command = (
|
|
||||||
os.path.join(build_context.container_build_dir, "default-build.sh")
|
|
||||||
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
|
||||||
)
|
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(
|
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
||||||
f"Executing: {build_command} with environment: "
|
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
||||||
f"{build_context.container_build_env}"
|
|
||||||
)
|
|
||||||
build_result = subprocess.run(
|
|
||||||
build_command, shell=True, env=build_context.container_build_env
|
|
||||||
)
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -144,61 +117,33 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these containers")
|
@click.option('--include', help="only build these containers")
|
||||||
@click.option("--exclude", help="don't build these containers")
|
@click.option('--exclude', help="don\'t build these containers")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option(
|
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
||||||
"--publish-images",
|
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Publish the built images in the specified image registry",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", help="Specify the image registry for --publish-images"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
||||||
ctx,
|
'''build the set of containers required for a complete stack'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_rebuild,
|
|
||||||
extra_build_args,
|
|
||||||
publish_images,
|
|
||||||
image_registry,
|
|
||||||
):
|
|
||||||
"""build the set of containers required for a complete stack"""
|
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -206,22 +151,21 @@ def command(
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(
|
container_build_env = make_container_build_env(dev_root_path,
|
||||||
dev_root_path,
|
container_build_dir,
|
||||||
container_build_dir,
|
opts.o.debug,
|
||||||
opts.o.debug,
|
force_rebuild,
|
||||||
force_rebuild,
|
extra_build_args)
|
||||||
extra_build_args,
|
|
||||||
)
|
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path,
|
dev_root_path
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -230,16 +174,10 @@ def command(
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit(
|
error_exit("container build failed and --continue-on-error not set, exiting")
|
||||||
"container build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Container Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Container Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,18 +32,14 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only build these packages")
|
@click.option('--include', help="only build these packages")
|
||||||
@click.option("--exclude", help="don't build these packages")
|
@click.option('--exclude', help="don\'t build these packages")
|
||||||
@click.option(
|
@click.option("--force-rebuild", is_flag=True, default=False,
|
||||||
"--force-rebuild",
|
help="Override existing target package version check -- force rebuild")
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override existing target package version check -- force rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
"""build the set of npm packages required for a complete stack"""
|
'''build the set of npm packages required for a complete stack'''
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -69,54 +65,45 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(
|
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Dev Root is: {dev_root_path}")
|
print(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Dev root directory doesn't exist, creating")
|
print('Dev root directory doesn\'t exist, creating')
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print("Build root directory doesn't exist, creating")
|
print('Build root directory doesn\'t exist, creating')
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "npm-package-list.txt"
|
|
||||||
) as package_list_file:
|
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config["npms"]
|
packages_in_scope = stack_config['npms']
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Packages: {packages_in_scope}")
|
print(f'Packages: {packages_in_scope}')
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating
|
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
||||||
# JS tooling file changes back into the cloned repo
|
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -129,63 +116,41 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = [
|
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
"cd /workspace && "
|
|
||||||
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
|
||||||
]
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
# so for now we use dict.update:
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
||||||
envs = {
|
}
|
||||||
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
|
||||||
# Convention used by our web app packages
|
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
|
||||||
}
|
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update(
|
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
||||||
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
|
||||||
if extra_build_args
|
|
||||||
else {}
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
docker.run(
|
docker.run(builder_js_image_name,
|
||||||
builder_js_image_name,
|
remove=True,
|
||||||
remove=True,
|
interactive=True,
|
||||||
interactive=True,
|
tty=True,
|
||||||
tty=True,
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
envs=envs,
|
||||||
envs=envs,
|
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
||||||
# TODO: detect this host name in npm_registry_url
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
# rather than hard-wiring it
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
command=build_command
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
)
|
||||||
command=build_command,
|
# Note that although the docs say that build_result should contain
|
||||||
)
|
# the command output as a string, in reality it is always the empty string.
|
||||||
# Note that although the docs say that build_result should
|
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
||||||
# contain the command output as a string, in reality it is
|
|
||||||
# always the empty string. Since we detect errors via catching
|
|
||||||
# exceptions below, we can safely ignore it here.
|
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print(
|
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
||||||
"FATAL Error: build failed and --continue-on-error "
|
|
||||||
"not set, exiting"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print(
|
print("****** Build Error, continuing because --continue-on-error is set")
|
||||||
"****** Build Error, continuing because "
|
|
||||||
"--continue-on-error is set"
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -203,12 +168,6 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(
|
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
||||||
f"FATAL: builder image: {builder_js_image_name} is required "
|
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
||||||
"but was not found"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"Please run this command to create it: "
|
|
||||||
"laconic-so --stack build-support build-containers"
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -24,5 +24,6 @@ class BuildContext:
|
|||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str, str]
|
container_build_env: Mapping[str,str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,23 +20,21 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config["containers"]
|
containers_in_scope = stack_config['containers']
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
|
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
||||||
with importlib.resources.open_text(
|
|
||||||
data, "container-image-list.txt"
|
|
||||||
) as container_list_file:
|
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Containers: {containers_in_scope}")
|
print(f'Containers: {containers_in_scope}')
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,8 +18,7 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers;
|
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
||||||
# allow re-build of either all or specific containers
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -33,55 +32,40 @@ from stack_orchestrator.build.build_types import BuildContext
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--base-container")
|
@click.option('--base-container')
|
||||||
@click.option(
|
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
||||||
"--source-repo", help="directory containing the webapp to build", required=True
|
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--force-rebuild",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Override dependency checking -- always rebuild",
|
|
||||||
)
|
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
"""build the specified webapp container"""
|
'''build the specified webapp container'''
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
|
quiet = ctx.obj.quiet
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/
|
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
||||||
# python-get-path-of-root-project-structure
|
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
container_build_dir = (
|
|
||||||
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
|
||||||
)
|
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(
|
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
||||||
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
|
||||||
f"{dev_root_path}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(
|
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
||||||
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Dev Root is: {dev_root_path}")
|
logger.log(f'Dev Root is: {dev_root_path}')
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(
|
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
||||||
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
force_rebuild, extra_build_args)
|
||||||
)
|
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -101,13 +85,12 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script,
|
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
||||||
# but with a different Dockerfile and work dir.
|
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
||||||
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
base_container.replace("/", "-"),
|
||||||
)
|
"Dockerfile.webapp")
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,8 +52,7 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET \
|
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -61,9 +60,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(
|
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
||||||
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
|
||||||
)
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -71,10 +68,7 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(
|
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
||||||
f"failed to fetch tags from image registry, "
|
|
||||||
f"status code: {response.status_code}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -85,9 +79,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(
|
def _filter_for_platform(container: str,
|
||||||
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
registry_info: RegistryInfo,
|
||||||
) -> List[str]:
|
tag_list: List[str]) -> List[str] :
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -104,7 +98,7 @@ def _filter_for_platform(
|
|||||||
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"manifest: {manifest}")
|
print(f"manifest: {manifest}")
|
||||||
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image_architecture: {image_architecture}")
|
print(f"image_architecture: {image_architecture}")
|
||||||
if this_machine == image_architecture:
|
if this_machine == image_architecture:
|
||||||
@ -143,44 +137,21 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--include", help="only fetch these containers")
|
@click.option('--include', help="only fetch these containers")
|
||||||
@click.option("--exclude", help="don't fetch these containers")
|
@click.option('--exclude', help="don\'t fetch these containers")
|
||||||
@click.option(
|
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
||||||
"--force-local-overwrite",
|
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
||||||
is_flag=True,
|
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
||||||
default=False,
|
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
||||||
help="Overwrite a locally built image, if present",
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-username", required=True, help="Specify the image registry username"
|
|
||||||
)
|
|
||||||
@click.option(
|
|
||||||
"--registry-token", required=True, help="Specify the image registry access token"
|
|
||||||
)
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(
|
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
||||||
ctx,
|
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
||||||
include,
|
|
||||||
exclude,
|
|
||||||
force_local_overwrite,
|
|
||||||
image_registry,
|
|
||||||
registry_username,
|
|
||||||
registry_token,
|
|
||||||
):
|
|
||||||
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(
|
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
||||||
registry_info.registry,
|
|
||||||
registry_info.registry_username,
|
|
||||||
registry_info.registry_token,
|
|
||||||
)
|
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -201,24 +172,19 @@ def command(
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if _exists_locally(container):
|
if (_exists_locally(container)):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if force_local_overwrite:
|
if (force_local_overwrite):
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Warning: overwriting local tag from this image: {container} because "
|
||||||
f"Warning: overwriting local tag from this image: "
|
"--force-local-overwrite was specified")
|
||||||
f"{container} because --force-local-overwrite was specified"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(
|
print(f"Skipping local tagging for this image: {container} because that would "
|
||||||
f"Skipping local tagging for this image: {container} "
|
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
||||||
"because that would overwrite an existing :local tagged "
|
|
||||||
"image, use --force-local-overwrite to do so."
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -226,7 +192,4 @@ def command(
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print(
|
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
||||||
"Warning: couldn't find usable images for one or more containers, "
|
|
||||||
"this stack will not deploy"
|
|
||||||
)
|
|
||||||
|
|||||||
@ -34,13 +34,5 @@ volumes_key = "volumes"
|
|||||||
security_key = "security"
|
security_key = "security"
|
||||||
annotations_key = "annotations"
|
annotations_key = "annotations"
|
||||||
labels_key = "labels"
|
labels_key = "labels"
|
||||||
replicas_key = "replicas"
|
|
||||||
node_affinities_key = "node-affinities"
|
|
||||||
node_tolerations_key = "node-tolerations"
|
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
cri_base_filename = "cri-base.json"
|
|
||||||
unlimited_memlock_key = "unlimited-memlock"
|
|
||||||
runtime_class_key = "runtime-class"
|
|
||||||
high_memlock_runtime = "high-memlock"
|
|
||||||
high_memlock_spec_filename = "high-memlock-spec.json"
|
|
||||||
|
|||||||
@ -20,7 +20,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
||||||
@ -51,7 +51,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -73,7 +73,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -14,3 +14,4 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
image: blastio/blast-geth:${NETWORK:-mainnet}
|
||||||
@ -53,7 +53,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -76,7 +76,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -17,3 +17,4 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -4,5 +4,9 @@ services:
|
|||||||
ping-pub:
|
ping-pub:
|
||||||
image: cerc/ping-pub:local
|
image: cerc/ping-pub:local
|
||||||
restart: always
|
restart: always
|
||||||
|
environment:
|
||||||
|
LACONIC_LACONICD_CHAIN_ID: ${LACONIC_LACONICD_CHAIN_ID:-laconic_9000-1}
|
||||||
|
LACONIC_LACONICD_RPC_URL: ${LACONIC_LACONICD_RPC_URL:-http://localhost:26657}
|
||||||
|
LACONIC_LACONICD_API_URL: ${LACONIC_LACONICD_API_URL:-http://localhost:1317}
|
||||||
ports:
|
ports:
|
||||||
- "5173:5173"
|
- 5173
|
||||||
|
|||||||
@ -32,4 +32,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
reth_data:
|
reth_data:
|
||||||
lighthouse_data:
|
lighthouse_data:
|
||||||
shared_data:
|
shared_data:
|
||||||
@ -12,7 +12,7 @@ services:
|
|||||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||||
ports:
|
ports:
|
||||||
- "5432"
|
- "5432"
|
||||||
|
|
||||||
test-client:
|
test-client:
|
||||||
image: cerc/test-database-client:local
|
image: cerc/test-database-client:local
|
||||||
|
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
||||||
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
||||||
@ -1411,4 +1411,4 @@
|
|||||||
"uid": "nT9VeZoVk",
|
"uid": "nT9VeZoVk",
|
||||||
"version": 2,
|
"version": 2,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
@ -10,7 +10,6 @@ MONIKER="localtestnet"
|
|||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="${LOGLEVEL:-info}"
|
||||||
DENOM="alnt"
|
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||||
@ -34,7 +33,7 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
laconicd init $MONIKER --chain-id $CHAINID --default-denom photon
|
||||||
|
|
||||||
update_genesis() {
|
update_genesis() {
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
||||||
@ -89,13 +88,15 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Run this to allow requests from any origin
|
||||||
|
sed -i 's/cors_allowed_origins.*$/cors_allowed_origins = ["*"]/' $HOME/.laconicd/config/config.toml
|
||||||
|
sed -i 's/enabled-unsafe-cors.*$/enabled-unsafe-cors = true/' $HOME/.laconicd/config/app.toml
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
# 10^30 alnt | 10^12 lnt
|
laconicd genesis add-genesis-account $KEY 100000000000000000000000000photon --keyring-backend $KEYRING
|
||||||
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
# 10^24 alnt | 10^6 lnt
|
laconicd genesis gentx $KEY 1000000000000000000000photon --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd genesis collect-gentxs
|
||||||
@ -110,7 +111,7 @@ fi
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1$DENOM \
|
--minimum-gas-prices=0.0001photon \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
--rpc.laddr="tcp://0.0.0.0:26657" \
|
||||||
--gql-server --gql-playground
|
--gql-server --gql-playground
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 2000000alnt
|
fees: 200000photon
|
||||||
|
|||||||
@ -65,7 +65,7 @@ if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then
|
|||||||
# Sequencer
|
# Sequencer
|
||||||
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
||||||
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
||||||
|
|
||||||
echo "Funding accounts."
|
echo "Funding accounts."
|
||||||
wait_for_block 1 300
|
wait_for_block 1 300
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
||||||
|
|||||||
@ -56,7 +56,7 @@
|
|||||||
"value": "!validator-pubkey"
|
"value": "!validator-pubkey"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"supply": []
|
"supply": []
|
||||||
},
|
},
|
||||||
@ -269,4 +269,4 @@
|
|||||||
"claims": null
|
"claims": null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2084,4 +2084,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -29,3 +29,4 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -12,10 +12,7 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = (
|
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
||||||
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
|
||||||
f"-d {db_name} -c --inserts -f {file_name}"
|
|
||||||
)
|
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 2000000alnt
|
fees: 200000photon
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ LOGLEVEL="info"
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=1alnt \
|
--minimum-gas-prices=0.0001photon \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
|||||||
@ -1901,4 +1901,4 @@
|
|||||||
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
||||||
"version": 18,
|
"version": 18,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
@ -849,7 +849,7 @@ groups:
|
|||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
|
|
||||||
# Secured Finance
|
# Secured Finance
|
||||||
- uid: secured_finance_diff_external
|
- uid: secured_finance_diff_external
|
||||||
title: secured_finance_watcher_head_tracking
|
title: secured_finance_watcher_head_tracking
|
||||||
|
|||||||
@ -14,7 +14,7 @@ echo ACCOUNT_PRIVATE_KEY=${CERC_PRIVATE_KEY_DEPLOYER} >> .env
|
|||||||
if [ -f ${erc20_address_file} ]; then
|
if [ -f ${erc20_address_file} ]; then
|
||||||
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
||||||
cat ${erc20_address_file}
|
cat ${erc20_address_file}
|
||||||
|
|
||||||
# Keep the container running
|
# Keep the container running
|
||||||
tail -f
|
tail -f
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -940,3 +940,4 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -18,3 +18,4 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ local_npm_registry_url=$2
|
|||||||
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
||||||
# Use yarn info to get URL checksums etc from the new registry
|
# Use yarn info to get URL checksums etc from the new registry
|
||||||
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
||||||
# First check if the target version actually exists.
|
# First check if the target version actually exists.
|
||||||
# If it doesn't exist there will be no .data.dist.tarball element,
|
# If it doesn't exist there will be no .data.dist.tarball element,
|
||||||
# and jq will output the string "null"
|
# and jq will output the string "null"
|
||||||
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
||||||
|
|||||||
@ -11,8 +11,6 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data["el_premine"].items():
|
for key, value in data['el_premine'].items():
|
||||||
acct = w3.eth.account.from_mnemonic(
|
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
||||||
data["mnemonic"], account_path=key, passphrase=""
|
|
||||||
)
|
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -4,4 +4,4 @@ out = 'out'
|
|||||||
libs = ['lib']
|
libs = ['lib']
|
||||||
remappings = ['ds-test/=lib/ds-test/src/']
|
remappings = ['ds-test/=lib/ds-test/src/']
|
||||||
|
|
||||||
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
||||||
@ -20,4 +20,4 @@ contract Stateful {
|
|||||||
function inc() public {
|
function inc() public {
|
||||||
x = x + 1;
|
x = x + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -14,7 +14,7 @@ funds_balance=$(echo ${funds_response} | jq -r ".[0].balance[0].quantity")
|
|||||||
echo "Balance is: ${funds_balance}"
|
echo "Balance is: ${funds_balance}"
|
||||||
|
|
||||||
# Create a bond
|
# Create a bond
|
||||||
bond_create_result=$(${registry_command} bond create --type alnt --quantity 1000000000)
|
bond_create_result=$(${registry_command} bond create --type photon --quantity 1000000000)
|
||||||
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
||||||
echo "Created bond with id: ${bond_id}"
|
echo "Created bond with id: ${bond_id}"
|
||||||
|
|
||||||
|
|||||||
@ -11,4 +11,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -9,4 +9,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Build cerc/laconicd
|
# Build cerc/laconicd
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
||||||
@ -26,14 +26,8 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
|
||||||
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
./build-webapp.sh || exit 1
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
if [ -f "next.config.mjs" ]; then
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
NEXT_CONFIG_JS="next.config.mjs"
|
||||||
IMPORT_OR_REQUIRE="import"
|
IMPORT_OR_REQUIRE="import"
|
||||||
|
|||||||
@ -30,44 +30,36 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
if [ -f "./run-webapp.sh" ]; then
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
echo "Running webapp with run-webapp.sh ..."
|
mv .next .next.old
|
||||||
cd "${WORK_DIR}" || exit 1
|
mv .next-r/.next .
|
||||||
./run-webapp.sh &
|
|
||||||
tpid=$!
|
|
||||||
wait $tpid
|
|
||||||
else
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
|
||||||
mv .next .next.old
|
|
||||||
mv .next-r/.next .
|
|
||||||
|
|
||||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
npm run cerc_generate > gen.out 2>&1 &
|
npm run cerc_generate > gen.out 2>&1 &
|
||||||
tail -f gen.out &
|
tail -f gen.out &
|
||||||
tpid=$!
|
tpid=$!
|
||||||
|
|
||||||
count=0
|
count=0
|
||||||
generate_done="false"
|
generate_done="false"
|
||||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
grep 'rendered as static' gen.out > /dev/null
|
grep 'rendered as static' gen.out > /dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
generate_done="true"
|
generate_done="true"
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $generate_done != "true" ]; then
|
|
||||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
if [ $generate_done != "true" ]; then
|
||||||
tpid=""
|
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||||
|
tpid=""
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
|
|||||||
@ -5,3 +5,4 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN yarn
|
RUN yarn
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,5 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
||||||
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/cosmos-explorer
|
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/explorer
|
||||||
if [[ $? -ne 0 ]]; then
|
|
||||||
echo "FATAL: Base container build failed, exiting"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
||||||
|
|||||||
@ -7,16 +7,16 @@
|
|||||||
"rpc": [
|
"rpc": [
|
||||||
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
||||||
],
|
],
|
||||||
"sdk_version": "0.50.3",
|
"sdk_version": "0.45.1",
|
||||||
"coin_type": "118",
|
"coin_type": "118",
|
||||||
"min_tx_fee": "800",
|
"min_tx_fee": "800",
|
||||||
"addr_prefix": "laconic",
|
"addr_prefix": "ethm",
|
||||||
"logo": "/logos/cosmos.svg",
|
"logo": "/logos/cosmos.svg",
|
||||||
"assets": [{
|
"assets": [{
|
||||||
"base": "alnt",
|
"base": "photon",
|
||||||
"symbol": "LNT",
|
"symbol": "LNT",
|
||||||
"exponent": "18",
|
"exponent": "6",
|
||||||
"coingecko_id": "cosmos",
|
"coingecko_id": "cosmos",
|
||||||
"logo": "/logos/cosmos.svg"
|
"logo": "/logos/cosmos.svg"
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,6 +26,11 @@ fi
|
|||||||
# subvert this lunacy.
|
# subvert this lunacy.
|
||||||
explorer_mainnet_config_dir=/app/chains/mainnet
|
explorer_mainnet_config_dir=/app/chains/mainnet
|
||||||
explorer_testnet_config_dir=/app/chains/testnet
|
explorer_testnet_config_dir=/app/chains/testnet
|
||||||
|
|
||||||
|
# Create required directories
|
||||||
|
mkdir -p $explorer_mainnet_config_dir
|
||||||
|
mkdir -p $explorer_testnet_config_dir
|
||||||
|
|
||||||
config_template_file=/config/chains/laconic-chaindata-template.json
|
config_template_file=/config/chains/laconic-chaindata-template.json
|
||||||
chain_config_name=laconic.json
|
chain_config_name=laconic.json
|
||||||
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
||||||
|
|||||||
@ -2,4 +2,4 @@
|
|||||||
# Build cerc/test-container
|
# Build cerc/test-container
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
||||||
@ -8,7 +8,7 @@ CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
|||||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||||
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
||||||
|
|
||||||
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
||||||
# If there is only one HTML file, assume an SPA.
|
# If there is only one HTML file, assume an SPA.
|
||||||
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
||||||
CERC_SINGLE_PAGE_APP=true
|
CERC_SINGLE_PAGE_APP=true
|
||||||
|
|||||||
@ -1,260 +0,0 @@
|
|||||||
# Caddy Ingress Controller for kind
|
|
||||||
# Based on: https://github.com/caddyserver/ingress
|
|
||||||
# Provides automatic HTTPS with Let's Encrypt
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
- endpoints
|
|
||||||
- nodes
|
|
||||||
- pods
|
|
||||||
- namespaces
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- get
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- nodes
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses/status
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingressclasses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- coordination.k8s.io
|
|
||||||
resources:
|
|
||||||
- leases
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller-configmap
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
data:
|
|
||||||
# Caddy global options
|
|
||||||
acmeCA: "https://acme-v02.api.letsencrypt.org/directory"
|
|
||||||
email: ""
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
type: NodePort
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: 80
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
- name: https
|
|
||||||
port: 443
|
|
||||||
targetPort: https
|
|
||||||
protocol: TCP
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: caddy-ingress-controller
|
|
||||||
namespace: caddy-system
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
app.kubernetes.io/component: controller
|
|
||||||
spec:
|
|
||||||
serviceAccountName: caddy-ingress-controller
|
|
||||||
terminationGracePeriodSeconds: 60
|
|
||||||
nodeSelector:
|
|
||||||
ingress-ready: "true"
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
tolerations:
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/master
|
|
||||||
operator: Equal
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Equal
|
|
||||||
containers:
|
|
||||||
- name: caddy-ingress-controller
|
|
||||||
image: caddy/ingress:latest
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 80
|
|
||||||
hostPort: 80
|
|
||||||
protocol: TCP
|
|
||||||
- name: https
|
|
||||||
containerPort: 443
|
|
||||||
hostPort: 443
|
|
||||||
protocol: TCP
|
|
||||||
env:
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
args:
|
|
||||||
- -config-map=caddy-system/caddy-ingress-controller-configmap
|
|
||||||
- -class-name=caddy
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 1000m
|
|
||||||
memory: 512Mi
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 9765
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /healthz
|
|
||||||
port: 9765
|
|
||||||
initialDelaySeconds: 3
|
|
||||||
periodSeconds: 10
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: true
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_BIND_SERVICE
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
runAsUser: 0
|
|
||||||
runAsGroup: 0
|
|
||||||
volumeMounts:
|
|
||||||
- name: caddy-data
|
|
||||||
mountPath: /data
|
|
||||||
- name: caddy-config
|
|
||||||
mountPath: /config
|
|
||||||
volumes:
|
|
||||||
- name: caddy-data
|
|
||||||
emptyDir: {}
|
|
||||||
- name: caddy-config
|
|
||||||
emptyDir: {}
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: IngressClass
|
|
||||||
metadata:
|
|
||||||
name: caddy
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: caddy-ingress-controller
|
|
||||||
app.kubernetes.io/instance: caddy-ingress
|
|
||||||
annotations:
|
|
||||||
ingressclass.kubernetes.io/is-default-class: "true"
|
|
||||||
spec:
|
|
||||||
controller: caddy.io/ingress-controller
|
|
||||||
@ -6,7 +6,7 @@ JS/TS/NPM builds need an npm registry to store intermediate package artifacts.
|
|||||||
This can be supplied by the user (e.g. using a hosted registry or even npmjs.com), or a local registry using gitea can be deployed by stack orchestrator.
|
This can be supplied by the user (e.g. using a hosted registry or even npmjs.com), or a local registry using gitea can be deployed by stack orchestrator.
|
||||||
To use a user-supplied registry set these environment variables:
|
To use a user-supplied registry set these environment variables:
|
||||||
|
|
||||||
`CERC_NPM_REGISTRY_URL` and
|
`CERC_NPM_REGISTRY_URL` and
|
||||||
`CERC_NPM_AUTH_TOKEN`
|
`CERC_NPM_AUTH_TOKEN`
|
||||||
|
|
||||||
Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry.
|
Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry.
|
||||||
@ -22,7 +22,7 @@ $ laconic-so --stack build-support build-containers
|
|||||||
|
|
||||||
```
|
```
|
||||||
$ laconic-so --stack package-registry setup-repositories
|
$ laconic-so --stack package-registry setup-repositories
|
||||||
$ laconic-so --stack package-registry build-containers
|
$ laconic-so --stack package-registry build-containers
|
||||||
$ laconic-so --stack package-registry deploy up
|
$ laconic-so --stack package-registry deploy up
|
||||||
[+] Running 3/3
|
[+] Running 3/3
|
||||||
⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s
|
⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s
|
||||||
|
|||||||
@ -14,3 +14,4 @@ containers:
|
|||||||
pods:
|
pods:
|
||||||
- fixturenet-blast
|
- fixturenet-blast
|
||||||
- foundry
|
- foundry
|
||||||
|
|
||||||
@ -3,3 +3,4 @@
|
|||||||
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
||||||
|
|
||||||
TODO: write me
|
TODO: write me
|
||||||
|
|
||||||
|
|||||||
@ -64,6 +64,5 @@ $ laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-
|
|||||||
Balance is: 99998999999999998999600000
|
Balance is: 99998999999999998999600000
|
||||||
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
...
|
|
||||||
```
|
```
|
||||||
The published records should be visible in the console.
|
The published record should be visible in the console.
|
||||||
|
|||||||
@ -30,3 +30,4 @@ config:
|
|||||||
cli:
|
cli:
|
||||||
key: laconicd.mykey
|
key: laconicd.mykey
|
||||||
address: laconicd.myaddress
|
address: laconicd.myaddress
|
||||||
|
|
||||||
|
|||||||
@ -12,7 +12,7 @@ $ chmod +x ./laconic-so
|
|||||||
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
||||||
```
|
```
|
||||||
## 2. Prepare the local build environment
|
## 2. Prepare the local build environment
|
||||||
Note that this step needs only to be done once on a new machine.
|
Note that this step needs only to be done once on a new machine.
|
||||||
Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands:
|
Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands:
|
||||||
```
|
```
|
||||||
$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil
|
$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user