forked from cerc-io/stack-orchestrator
Compare commits
74 Commits
pm-update-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 88dccdfb7c | |||
|
|
76c0c17c3b | ||
| 6a2bbae250 | |||
|
|
458b548dcf | ||
| 789b2dd3a7 | |||
| 55b76b9b57 | |||
|
|
d07a3afd27 | ||
|
|
a5b373da26 | ||
|
|
99db75da19 | ||
|
|
d4e935484f | ||
|
|
4f01054781 | ||
|
|
811bbd9db4 | ||
|
|
8d9682eb47 | ||
|
|
638435873c | ||
|
|
97a85359ff | ||
|
|
ffa00767d4 | ||
|
|
86462c940f | ||
|
|
87db167d7f | ||
|
|
dd856af2d3 | ||
|
|
cd3d908d0d | ||
|
|
03f9acf869 | ||
|
|
ba1aad9fa6 | ||
|
|
dc36a6564a | ||
|
|
c5c3fc1618 | ||
|
|
2e384b7179 | ||
|
|
b708836aa9 | ||
|
|
d8da9b6515 | ||
|
|
5a1399f2b2 | ||
|
|
89db6e1e92 | ||
|
|
9bd59f29d9 | ||
| 55d6c5b495 | |||
|
|
f3ef3e9a1f | ||
|
|
1768bd0fe1 | ||
| 8afae1904b | |||
| 7acabb0743 | |||
| ccccd9f957 | |||
| 34f3b719e4 | |||
| 0e814bd4da | |||
| 873a6d472c | |||
| 39df4683ac | |||
| 23ca4c4341 | |||
| f64ef5d128 | |||
| 5f8e809b2d | |||
| 4a7df2de33 | |||
| 0c47da42fe | |||
| e290c62aca | |||
| f1fdc48aaa | |||
| a54072de6c | |||
| fa21ff2627 | |||
| 33d395e213 | |||
| 75ff60752a | |||
| 44b9709717 | |||
| e56da7dcc1 | |||
| 60d34217f8 | |||
| 952389abb0 | |||
| 5c275aa622 | |||
| 8576137557 | |||
| 65c1cdf6b1 | |||
| 265699bc38 | |||
| 4a7670a5d6 | |||
| 6087e1cd31 | |||
| 1def279d26 | |||
| 64691bd206 | |||
| aef5986135 | |||
| 6f8f0340d3 | |||
| 7590d6e237 | |||
| 573f99dbbe | |||
| 8052c1c25e | |||
| a674d13493 | |||
| 0d4f4509c8 | |||
| 5af27b1b3a | |||
| 6c91b87348 | |||
| 7d18334953 | |||
| 79c1c5ed99 |
@ -1,57 +0,0 @@
|
|||||||
name: Fixturenet-Eth-Plugeth-Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '2 14 * * *'
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run an Ethereum plugeth fixturenet test"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Run fixturenet-eth tests"
|
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
@ -1,55 +0,0 @@
|
|||||||
name: Fixturenet-Eth-Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run an Ethereum fixturenet test"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Run fixturenet-eth tests"
|
|
||||||
run: ./tests/fixturenet-eth/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
||||||
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -33,7 +34,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -1,19 +1,23 @@
|
|||||||
name: Fixturenet-Eth-Plugeth-Arm-Test
|
name: K8s Deployment Control Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-arm-test'
|
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
||||||
|
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
||||||
|
- 'tests/k8s-deployment-control/run-test.sh'
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '2 14 * * *'
|
- cron: '3 30 * * *'
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run an Ethereum plugeth fixturenet test"
|
name: "Run deployment control suite on kind/k8s"
|
||||||
runs-on: ubuntu-latest-arm
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -32,13 +36,22 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Check cgroups version"
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
run: mount | grep cgroup
|
||||||
|
- name: "Install kind"
|
||||||
|
run: ./tests/scripts/install-kind.sh
|
||||||
|
- name: "Install Kubectl"
|
||||||
|
run: ./tests/scripts/install-kubectl.sh
|
||||||
|
- name: "Run k8s deployment control test"
|
||||||
|
run: |
|
||||||
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
|
join_cgroup
|
||||||
|
./tests/k8s-deployment-control/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
||||||
@ -2,7 +2,8 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -32,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-plugeth-arm-test CI job
|
|
||||||
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
|
||||||
trigger
|
|
||||||
trigger
|
|
||||||
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
|
||||||
|
|
||||||
@ -7,3 +7,4 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
|
Trigger
|
||||||
|
|||||||
@ -1 +1,3 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
|
Triggered: 2026-01-21
|
||||||
|
Triggered: 2026-01-21 19:28:29
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
Change this file to trigger running the test-database CI job
|
Change this file to trigger running the test-database CI job
|
||||||
Trigger test run
|
Trigger test run
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
34
.pre-commit-config.yaml
Normal file
34
.pre-commit-config.yaml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
args: ['--allow-multiple-documents']
|
||||||
|
- id: check-json
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-added-large-files
|
||||||
|
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 23.12.1
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
language_version: python3
|
||||||
|
|
||||||
|
- repo: https://github.com/PyCQA/flake8
|
||||||
|
rev: 7.1.1
|
||||||
|
hooks:
|
||||||
|
- id: flake8
|
||||||
|
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
||||||
|
|
||||||
|
- repo: https://github.com/RobertCraigie/pyright-python
|
||||||
|
rev: v1.1.345
|
||||||
|
hooks:
|
||||||
|
- id: pyright
|
||||||
|
|
||||||
|
- repo: https://github.com/adrienverge/yamllint
|
||||||
|
rev: v1.35.1
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [-d, relaxed]
|
||||||
151
AI-FRIENDLY-PLAN.md
Normal file
151
AI-FRIENDLY-PLAN.md
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
# Plan: Make Stack-Orchestrator AI-Friendly
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 1: Documentation & Context Files
|
||||||
|
|
||||||
|
### 1.1 Add CLAUDE.md
|
||||||
|
|
||||||
|
Create a root-level context file for AI assistants.
|
||||||
|
|
||||||
|
**File:** `CLAUDE.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Project overview (what stack-orchestrator does)
|
||||||
|
- Stack creation workflow (step-by-step)
|
||||||
|
- File naming conventions
|
||||||
|
- Required vs optional fields in stack.yml
|
||||||
|
- Common patterns and anti-patterns
|
||||||
|
- Links to example stacks (simple, medium, complex)
|
||||||
|
|
||||||
|
### 1.2 Add JSON Schema for stack.yml
|
||||||
|
|
||||||
|
Create formal validation schema.
|
||||||
|
|
||||||
|
**File:** `schemas/stack-schema.json`
|
||||||
|
|
||||||
|
Benefits:
|
||||||
|
- AI tools can validate generated stacks
|
||||||
|
- IDEs provide autocomplete
|
||||||
|
- CI can catch errors early
|
||||||
|
|
||||||
|
### 1.3 Add Template Stack with Comments
|
||||||
|
|
||||||
|
Create an annotated template for reference.
|
||||||
|
|
||||||
|
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Stack definition template - copy this directory to create a new stack
|
||||||
|
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
||||||
|
name: my-stack # Required: lowercase, hyphens only
|
||||||
|
description: "Human-readable description" # Optional
|
||||||
|
repos: # Git repositories to clone
|
||||||
|
- github.com/org/repo
|
||||||
|
containers: # Container images to build (must have matching container-build/)
|
||||||
|
- cerc/my-container
|
||||||
|
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
||||||
|
- my-pod
|
||||||
|
```
|
||||||
|
|
||||||
|
### 1.4 Document Validation Rules
|
||||||
|
|
||||||
|
Create explicit documentation of constraints currently scattered in code.
|
||||||
|
|
||||||
|
**File:** `docs/stack-format.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Container names must start with `cerc/`
|
||||||
|
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
||||||
|
- Repository format: `host/org/repo[@ref]`
|
||||||
|
- Stack directory name should match `name` field
|
||||||
|
- Version field options and differences
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 2: Add `create-stack` Command
|
||||||
|
|
||||||
|
### 2.1 Command Overview
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Behavior:**
|
||||||
|
1. Parse repo URL to extract app name (if --name not provided)
|
||||||
|
2. Create `stacks/{name}/stack.yml`
|
||||||
|
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
||||||
|
4. Create `compose/docker-compose-{name}.yml`
|
||||||
|
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
||||||
|
|
||||||
|
### 2.2 Files to Create
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | Package init |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
||||||
|
|
||||||
|
### 2.3 Files to Modify
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
### 2.4 Command Options
|
||||||
|
|
||||||
|
| Option | Required | Description |
|
||||||
|
|--------|----------|-------------|
|
||||||
|
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
||||||
|
| `--name` | No | Stack name (defaults to repo name) |
|
||||||
|
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
||||||
|
| `--force` | No | Overwrite existing files |
|
||||||
|
|
||||||
|
### 2.5 Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| service | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| empty | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 3: Implementation Summary
|
||||||
|
|
||||||
|
### New Files (6)
|
||||||
|
|
||||||
|
1. `CLAUDE.md` - AI assistant context
|
||||||
|
2. `schemas/stack-schema.json` - Validation schema
|
||||||
|
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
||||||
|
4. `docs/stack-format.md` - Stack format documentation
|
||||||
|
5. `stack_orchestrator/create/__init__.py` - Package init
|
||||||
|
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
||||||
|
|
||||||
|
### Modified Files (1)
|
||||||
|
|
||||||
|
1. `stack_orchestrator/main.py` - Register create-stack command
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# 3. Creates all expected files
|
||||||
|
laconic-so create-stack --repo github.com/org/test-app
|
||||||
|
ls stack_orchestrator/data/stacks/test-app/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-test-app/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
||||||
|
|
||||||
|
# 4. Build works with generated stack
|
||||||
|
laconic-so --stack test-app build-containers
|
||||||
|
```
|
||||||
50
CLAUDE.md
Normal file
50
CLAUDE.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
||||||
|
|
||||||
|
## Some rules to follow
|
||||||
|
NEVER speculate about the cause of something
|
||||||
|
NEVER assume your hypotheses are true without evidence
|
||||||
|
|
||||||
|
ALWAYS clearly state when something is a hypothesis
|
||||||
|
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
||||||
|
|
||||||
|
## Key Principles
|
||||||
|
|
||||||
|
### Development Guidelines
|
||||||
|
- **Single responsibility** - Each component has one clear purpose
|
||||||
|
- **Fail fast** - Let errors propagate, don't hide failures
|
||||||
|
- **DRY/KISS** - Minimize duplication and complexity
|
||||||
|
|
||||||
|
## Development Philosophy: Conversational Literate Programming
|
||||||
|
|
||||||
|
### Approach
|
||||||
|
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
||||||
|
|
||||||
|
### Core Principles
|
||||||
|
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
||||||
|
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
||||||
|
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
||||||
|
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
||||||
|
|
||||||
|
### Working Method
|
||||||
|
1. **Explore and Understand**: Read existing code to understand current state
|
||||||
|
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
||||||
|
3. **Document Intent**: Update TODO.md with clear justification before coding
|
||||||
|
4. **Explain Changes**: Each modification includes reasoning and context
|
||||||
|
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
||||||
|
|
||||||
|
### Implementation Guidelines
|
||||||
|
- Treat conversations as primary documentation
|
||||||
|
- Explain architectural decisions before implementing
|
||||||
|
- Use TODO.md as the "literate document" that justifies all work
|
||||||
|
- Maintain clear narrative threads across sessions
|
||||||
|
- Workshop complex ideas before coding
|
||||||
|
|
||||||
|
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
||||||
|
|
||||||
|
## Insights and Observations
|
||||||
|
|
||||||
|
### Design Principles
|
||||||
|
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
||||||
|
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
||||||
2
LICENSE
2
LICENSE
@ -658,4 +658,4 @@
|
|||||||
You should also get your employer (if you work as a programmer) or school,
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
<http://www.gnu.org/licenses/>.
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|||||||
@ -26,7 +26,7 @@ curl -SL https://github.com/docker/compose/releases/download/v2.11.2/docker-comp
|
|||||||
chmod +x ~/.docker/cli-plugins/docker-compose
|
chmod +x ~/.docker/cli-plugins/docker-compose
|
||||||
```
|
```
|
||||||
|
|
||||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||||
|
|
||||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||||
@ -78,5 +78,3 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
413
STACK-CREATION-GUIDE.md
Normal file
413
STACK-CREATION-GUIDE.md
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
# Implementing `laconic-so create-stack` Command
|
||||||
|
|
||||||
|
A plan for adding a new CLI command to scaffold stack files automatically.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Add a `create-stack` command that generates all required files for a new stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --name my-stack --type webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
```
|
||||||
|
stack_orchestrator/data/
|
||||||
|
├── stacks/my-stack/stack.yml
|
||||||
|
├── container-build/cerc-my-stack/
|
||||||
|
│ ├── Dockerfile
|
||||||
|
│ └── build.sh
|
||||||
|
└── compose/docker-compose-my-stack.yml
|
||||||
|
|
||||||
|
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CLI Architecture Summary
|
||||||
|
|
||||||
|
### Command Registration Pattern
|
||||||
|
|
||||||
|
Commands are Click functions registered in `main.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# main.py (line ~70)
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Global Options Access
|
||||||
|
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print("message")
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print("(would create files)")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Utilities
|
||||||
|
|
||||||
|
| Function | Location | Purpose |
|
||||||
|
|----------|----------|---------|
|
||||||
|
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
||||||
|
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
||||||
|
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Create
|
||||||
|
|
||||||
|
### 1. Command Module
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/__init__.py`**
|
||||||
|
```python
|
||||||
|
# Empty file to make this a package
|
||||||
|
```
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/create_stack.py`**
|
||||||
|
```python
|
||||||
|
import click
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import copy
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import error_exit, get_yaml
|
||||||
|
|
||||||
|
# Template types
|
||||||
|
STACK_TEMPLATES = {
|
||||||
|
"webapp": {
|
||||||
|
"description": "Web application with Node.js",
|
||||||
|
"base_image": "node:20-bullseye-slim",
|
||||||
|
"port": 3000,
|
||||||
|
},
|
||||||
|
"service": {
|
||||||
|
"description": "Backend service",
|
||||||
|
"base_image": "python:3.11-slim",
|
||||||
|
"port": 8080,
|
||||||
|
},
|
||||||
|
"empty": {
|
||||||
|
"description": "Minimal stack with no defaults",
|
||||||
|
"base_image": None,
|
||||||
|
"port": None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_dir() -> Path:
|
||||||
|
"""Get path to stack_orchestrator/data directory"""
|
||||||
|
return Path(__file__).absolute().parent.parent.joinpath("data")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_stack_name(name: str) -> None:
|
||||||
|
"""Validate stack name follows conventions"""
|
||||||
|
import re
|
||||||
|
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
||||||
|
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
||||||
|
if name.startswith("cerc-"):
|
||||||
|
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
||||||
|
|
||||||
|
|
||||||
|
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
||||||
|
"""Create stack.yml file"""
|
||||||
|
config = {
|
||||||
|
"version": "1.2",
|
||||||
|
"name": name,
|
||||||
|
"description": template.get("description", f"Stack: {name}"),
|
||||||
|
"repos": [repo_url] if repo_url else [],
|
||||||
|
"containers": [f"cerc/{name}"],
|
||||||
|
"pods": [name],
|
||||||
|
}
|
||||||
|
|
||||||
|
stack_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(stack_dir / "stack.yml", "w") as f:
|
||||||
|
get_yaml().dump(config, f)
|
||||||
|
|
||||||
|
|
||||||
|
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create Dockerfile"""
|
||||||
|
base_image = template.get("base_image", "node:20-bullseye-slim")
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
dockerfile_content = f'''# Build stage
|
||||||
|
FROM {base_image} AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
# Production stage
|
||||||
|
FROM {base_image}
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci --only=production
|
||||||
|
COPY --from=builder /app/dist ./dist
|
||||||
|
|
||||||
|
EXPOSE {port}
|
||||||
|
CMD ["npm", "run", "start"]
|
||||||
|
'''
|
||||||
|
|
||||||
|
container_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(container_dir / "Dockerfile", "w") as f:
|
||||||
|
f.write(dockerfile_content)
|
||||||
|
|
||||||
|
|
||||||
|
def create_build_script(container_dir: Path, name: str) -> None:
|
||||||
|
"""Create build.sh script"""
|
||||||
|
build_script = f'''#!/usr/bin/env bash
|
||||||
|
# Build cerc/{name}
|
||||||
|
|
||||||
|
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
||||||
|
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
|
docker build -t cerc/{name}:local \\
|
||||||
|
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
||||||
|
${{build_command_args}} \\
|
||||||
|
${{CERC_REPO_BASE_DIR}}/{name}
|
||||||
|
'''
|
||||||
|
|
||||||
|
build_path = container_dir / "build.sh"
|
||||||
|
with open(build_path, "w") as f:
|
||||||
|
f.write(build_script)
|
||||||
|
|
||||||
|
# Make executable
|
||||||
|
os.chmod(build_path, 0o755)
|
||||||
|
|
||||||
|
|
||||||
|
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create docker-compose file"""
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
compose_content = {
|
||||||
|
"version": "3.8",
|
||||||
|
"services": {
|
||||||
|
name: {
|
||||||
|
"image": f"cerc/{name}:local",
|
||||||
|
"restart": "unless-stopped",
|
||||||
|
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
||||||
|
"environment": {
|
||||||
|
"NODE_ENV": "${NODE_ENV:-production}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
||||||
|
get_yaml().dump(compose_content, f)
|
||||||
|
|
||||||
|
|
||||||
|
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
||||||
|
"""Add entry to a list file if not already present"""
|
||||||
|
list_path = data_dir / filename
|
||||||
|
|
||||||
|
# Read existing entries
|
||||||
|
existing = set()
|
||||||
|
if list_path.exists():
|
||||||
|
with open(list_path, "r") as f:
|
||||||
|
existing = set(line.strip() for line in f if line.strip())
|
||||||
|
|
||||||
|
# Add new entry
|
||||||
|
if entry not in existing:
|
||||||
|
with open(list_path, "a") as f:
|
||||||
|
f.write(f"{entry}\n")
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
||||||
|
@click.option("--type", "stack_type", default="webapp",
|
||||||
|
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
||||||
|
help="Stack template type")
|
||||||
|
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
||||||
|
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
||||||
|
"""Create a new stack with all required files.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-app --type webapp
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
||||||
|
"""
|
||||||
|
# Validate
|
||||||
|
validate_stack_name(name)
|
||||||
|
|
||||||
|
template = STACK_TEMPLATES[stack_type]
|
||||||
|
data_dir = get_data_dir()
|
||||||
|
|
||||||
|
# Define paths
|
||||||
|
stack_dir = data_dir / "stacks" / name
|
||||||
|
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
||||||
|
compose_dir = data_dir / "compose"
|
||||||
|
|
||||||
|
# Check for existing files
|
||||||
|
if not force:
|
||||||
|
if stack_dir.exists():
|
||||||
|
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
||||||
|
if container_dir.exists():
|
||||||
|
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
||||||
|
|
||||||
|
# Dry run check
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print(f"Would create stack '{name}' with template '{stack_type}':")
|
||||||
|
print(f" - {stack_dir}/stack.yml")
|
||||||
|
print(f" - {container_dir}/Dockerfile")
|
||||||
|
print(f" - {container_dir}/build.sh")
|
||||||
|
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
print(f" - Update repository-list.txt")
|
||||||
|
print(f" - Update container-image-list.txt")
|
||||||
|
print(f" - Update pod-list.txt")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create files
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
||||||
|
|
||||||
|
create_stack_yml(stack_dir, name, template, repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {stack_dir}/stack.yml")
|
||||||
|
|
||||||
|
create_dockerfile(container_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/Dockerfile")
|
||||||
|
|
||||||
|
create_build_script(container_dir, name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/build.sh")
|
||||||
|
|
||||||
|
create_compose_file(compose_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
|
||||||
|
# Update list files
|
||||||
|
if repo:
|
||||||
|
update_list_file(data_dir, "repository-list.txt", repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {repo} to repository-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added cerc/{name} to container-image-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "pod-list.txt", name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {name} to pod-list.txt")
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"\nStack '{name}' created successfully!")
|
||||||
|
print(f"\nNext steps:")
|
||||||
|
print(f" 1. Edit {stack_dir}/stack.yml")
|
||||||
|
print(f" 2. Customize {container_dir}/Dockerfile")
|
||||||
|
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
||||||
|
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Register Command in main.py
|
||||||
|
|
||||||
|
**Edit `stack_orchestrator/main.py`**
|
||||||
|
|
||||||
|
Add import:
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
```
|
||||||
|
|
||||||
|
Add command registration (after line ~78):
|
||||||
|
```python
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Step 1: Create module structure
|
||||||
|
```bash
|
||||||
|
mkdir -p stack_orchestrator/create
|
||||||
|
touch stack_orchestrator/create/__init__.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Create the command file
|
||||||
|
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
||||||
|
|
||||||
|
### Step 3: Register in main.py
|
||||||
|
Add the import and `cli.add_command()` line.
|
||||||
|
|
||||||
|
### Step 4: Test the command
|
||||||
|
```bash
|
||||||
|
# Show help
|
||||||
|
laconic-so create-stack --help
|
||||||
|
|
||||||
|
# Dry run
|
||||||
|
laconic-so --dry-run create-stack --name test-app --type webapp
|
||||||
|
|
||||||
|
# Create a stack
|
||||||
|
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
ls -la stack_orchestrator/data/stacks/test-app/
|
||||||
|
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| `service` | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| `empty` | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
1. **Interactive mode** - Prompt for values if not provided
|
||||||
|
2. **More templates** - Go, Rust, database stacks
|
||||||
|
3. **Template from existing** - `--from-stack existing-stack`
|
||||||
|
4. **External stack support** - Create in custom directory
|
||||||
|
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --name verify-test --type webapp
|
||||||
|
|
||||||
|
# 3. Full creation works
|
||||||
|
laconic-so create-stack --name verify-test --type webapp
|
||||||
|
ls stack_orchestrator/data/stacks/verify-test/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
|
||||||
|
# 4. Build works
|
||||||
|
laconic-so --stack verify-test build-containers
|
||||||
|
|
||||||
|
# 5. Cleanup
|
||||||
|
rm -rf stack_orchestrator/data/stacks/verify-test
|
||||||
|
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
||||||
|
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
```
|
||||||
16
TODO.md
Normal file
16
TODO.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# TODO
|
||||||
|
|
||||||
|
## Features Needed
|
||||||
|
|
||||||
|
### Update Stack Command
|
||||||
|
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
||||||
|
|
||||||
|
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
||||||
|
|
||||||
|
## Architecture Refactoring
|
||||||
|
|
||||||
|
### Separate Deployer from Stack Orchestrator CLI
|
||||||
|
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
||||||
|
|
||||||
|
### Separate Stacks from Stack Orchestrator Repo
|
||||||
|
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
||||||
68
docs/cli.md
68
docs/cli.md
@ -65,3 +65,71 @@ Force full rebuild of packages:
|
|||||||
```
|
```
|
||||||
$ laconic-so build-npms --include <package-name> --force-rebuild
|
$ laconic-so build-npms --include <package-name> --force-rebuild
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## deploy
|
||||||
|
|
||||||
|
The `deploy` command group manages persistent deployments. The general workflow is `deploy init` to generate a spec file, then `deploy create` to create a deployment directory from the spec, then runtime commands like `deploy up` and `deploy down`.
|
||||||
|
|
||||||
|
### deploy init
|
||||||
|
|
||||||
|
Generate a deployment spec file from a stack definition:
|
||||||
|
```
|
||||||
|
$ laconic-so --stack <stack-name> deploy init --output <spec-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
Options:
|
||||||
|
- `--output` (required): write spec file here
|
||||||
|
- `--config`: provide config variables for the deployment
|
||||||
|
- `--config-file`: provide config variables in a file
|
||||||
|
- `--kube-config`: provide a config file for a k8s deployment
|
||||||
|
- `--image-registry`: provide a container image registry url for this k8s cluster
|
||||||
|
- `--map-ports-to-host`: map ports to the host (`any-variable-random`, `localhost-same`, `any-same`, `localhost-fixed-random`, `any-fixed-random`)
|
||||||
|
|
||||||
|
### deploy create
|
||||||
|
|
||||||
|
Create a deployment directory from a spec file:
|
||||||
|
```
|
||||||
|
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir>
|
||||||
|
```
|
||||||
|
|
||||||
|
Update an existing deployment in-place (preserving data volumes and env file):
|
||||||
|
```
|
||||||
|
$ laconic-so --stack <stack-name> deploy create --spec-file <spec-file> --deployment-dir <dir> --update
|
||||||
|
```
|
||||||
|
|
||||||
|
Options:
|
||||||
|
- `--spec-file` (required): spec file to use
|
||||||
|
- `--deployment-dir`: target directory for deployment files
|
||||||
|
- `--update`: update an existing deployment directory, preserving data volumes and env file. Changed files are backed up with a `.bak` suffix. The deployment's `config.env` and `deployment.yml` are also preserved.
|
||||||
|
- `--network-dir`: network configuration supplied in this directory
|
||||||
|
- `--initial-peers`: initial set of persistent peers
|
||||||
|
|
||||||
|
### deploy up
|
||||||
|
|
||||||
|
Start a deployment:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir <deployment-dir> up
|
||||||
|
```
|
||||||
|
|
||||||
|
### deploy down
|
||||||
|
|
||||||
|
Stop a deployment:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir <deployment-dir> down
|
||||||
|
```
|
||||||
|
Use `--delete-volumes` to also remove data volumes.
|
||||||
|
|
||||||
|
### deploy ps
|
||||||
|
|
||||||
|
Show running services:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir <deployment-dir> ps
|
||||||
|
```
|
||||||
|
|
||||||
|
### deploy logs
|
||||||
|
|
||||||
|
View service logs:
|
||||||
|
```
|
||||||
|
$ laconic-so deployment --dir <deployment-dir> logs
|
||||||
|
```
|
||||||
|
Use `-f` to follow and `-n <count>` to tail.
|
||||||
|
|||||||
550
docs/docker-compose-deployment.md
Normal file
550
docs/docker-compose-deployment.md
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
# Docker Compose Deployment Guide
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
### What is a Deployer?
|
||||||
|
|
||||||
|
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
||||||
|
|
||||||
|
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To deploy stacks using Docker Compose, you need:
|
||||||
|
|
||||||
|
- Docker Engine (20.10+)
|
||||||
|
- Docker Compose plugin (v2.0+)
|
||||||
|
- Python 3.8+
|
||||||
|
- stack-orchestrator installed (`laconic-so`)
|
||||||
|
|
||||||
|
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
||||||
|
|
||||||
|
## Deployment Workflow
|
||||||
|
|
||||||
|
The typical deployment workflow consists of four main steps:
|
||||||
|
|
||||||
|
1. **Setup repositories and build containers** (first time only)
|
||||||
|
2. **Initialize deployment specification**
|
||||||
|
3. **Create deployment directory**
|
||||||
|
4. **Start and manage services**
|
||||||
|
|
||||||
|
## Quick Start Example
|
||||||
|
|
||||||
|
Here's a complete example using the built-in `test` stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Setup (first time only)
|
||||||
|
laconic-so --stack test setup-repositories
|
||||||
|
laconic-so --stack test build-containers
|
||||||
|
|
||||||
|
# Step 2: Initialize deployment spec
|
||||||
|
laconic-so --stack test deploy init --output test-spec.yml
|
||||||
|
|
||||||
|
# Step 3: Create deployment directory
|
||||||
|
laconic-so --stack test deploy create \
|
||||||
|
--spec-file test-spec.yml \
|
||||||
|
--deployment-dir test-deployment
|
||||||
|
|
||||||
|
# Step 4: Start services
|
||||||
|
laconic-so deployment --dir test-deployment start
|
||||||
|
|
||||||
|
# View running services
|
||||||
|
laconic-so deployment --dir test-deployment ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir test-deployment logs
|
||||||
|
|
||||||
|
# Stop services (preserves data)
|
||||||
|
laconic-so deployment --dir test-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
Stack-orchestrator supports two deployment workflows:
|
||||||
|
|
||||||
|
### 1. Deployment Directory Workflow (Recommended)
|
||||||
|
|
||||||
|
This workflow creates a persistent deployment directory that contains all configuration and data.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Production deployments
|
||||||
|
- When you need to preserve configuration
|
||||||
|
- When you want to manage multiple deployments
|
||||||
|
- When you need persistent volume data
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Optionally edit eth-spec.yml to customize configuration
|
||||||
|
|
||||||
|
# Create deployment directory
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir my-eth-deployment
|
||||||
|
|
||||||
|
# Start the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment start
|
||||||
|
|
||||||
|
# Manage the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment ps
|
||||||
|
laconic-so deployment --dir my-eth-deployment logs
|
||||||
|
laconic-so deployment --dir my-eth-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Quick Deploy Workflow
|
||||||
|
|
||||||
|
This workflow deploys directly without creating a persistent deployment directory.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Quick testing
|
||||||
|
- Temporary deployments
|
||||||
|
- Simple stacks that don't require customization
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start the stack directly
|
||||||
|
laconic-so --stack test deploy up
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
laconic-so --stack test deploy port test 80
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack test deploy logs
|
||||||
|
|
||||||
|
# Stop (preserves volumes)
|
||||||
|
laconic-so --stack test deploy down
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so --stack test deploy down --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Real-World Example: Ethereum Fixturenet
|
||||||
|
|
||||||
|
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Setup (first time only)
|
||||||
|
laconic-so --stack fixturenet-eth setup-repositories
|
||||||
|
laconic-so --stack fixturenet-eth build-containers
|
||||||
|
|
||||||
|
# Initialize with default configuration
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Create deployment
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir fixturenet-eth-deployment
|
||||||
|
|
||||||
|
# Start the network
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment ps
|
||||||
|
|
||||||
|
# Access logs from specific service
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
||||||
|
|
||||||
|
# Stop the network (preserves blockchain data)
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop
|
||||||
|
|
||||||
|
# Start again - blockchain data is preserved
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Clean up everything including data
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Passing Configuration Parameters
|
||||||
|
|
||||||
|
Configuration can be passed in three ways:
|
||||||
|
|
||||||
|
**1. At init time via `--config` flag:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack test deploy init --output spec.yml \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Edit the spec file after init:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize
|
||||||
|
laconic-so --stack test deploy init --output spec.yml
|
||||||
|
|
||||||
|
# Edit spec.yml
|
||||||
|
vim spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Example spec.yml:
|
||||||
|
```yaml
|
||||||
|
stack: test
|
||||||
|
config:
|
||||||
|
PARAM1: value1
|
||||||
|
PARAM2: value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Docker Compose defaults:**
|
||||||
|
|
||||||
|
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
||||||
|
|
||||||
|
### Port Mapping
|
||||||
|
|
||||||
|
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the host port for container port 80 on service 'webapp'
|
||||||
|
laconic-so deployment --dir my-deployment port webapp 80
|
||||||
|
|
||||||
|
# Output example: 0.0.0.0:32768
|
||||||
|
```
|
||||||
|
|
||||||
|
To configure fixed ports, edit the spec file before creating the deployment:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8080:80' # Maps host port 8080 to container port 80
|
||||||
|
api:
|
||||||
|
- '3000:3000'
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create the deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack my-stack deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Volume Persistence
|
||||||
|
|
||||||
|
Volumes are preserved between stop/start cycles by default:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop but keep data
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Start again - data is still there
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
To completely remove all data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop and delete all volumes
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
Volume data is stored in `<deployment-dir>/data/`.
|
||||||
|
|
||||||
|
## Common Operations
|
||||||
|
|
||||||
|
### Viewing Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# All services, continuous follow
|
||||||
|
laconic-so deployment --dir my-deployment logs --follow
|
||||||
|
|
||||||
|
# Last 100 lines from all services
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 100
|
||||||
|
|
||||||
|
# Specific service only
|
||||||
|
laconic-so deployment --dir my-deployment logs webapp
|
||||||
|
|
||||||
|
# Combine options
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
### Executing Commands in Containers
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute a command in a running service
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp ls -la
|
||||||
|
|
||||||
|
# Interactive shell
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
||||||
|
|
||||||
|
# Run command with specific environment variables
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checking Service Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all running services
|
||||||
|
laconic-so deployment --dir my-deployment ps
|
||||||
|
|
||||||
|
# Check using Docker directly
|
||||||
|
docker ps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updating a Running Deployment
|
||||||
|
|
||||||
|
If you need to change configuration after deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Edit the spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
|
||||||
|
# 2. Regenerate configuration
|
||||||
|
laconic-so deployment --dir my-deployment update
|
||||||
|
|
||||||
|
# 3. Restart services to apply changes
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Service Deployments
|
||||||
|
|
||||||
|
Many stacks deploy multiple services that work together:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deploy a stack with multiple services
|
||||||
|
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
||||||
|
laconic-so --stack laconicd-with-console deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir laconicd-deployment
|
||||||
|
|
||||||
|
laconic-so deployment --dir laconicd-deployment start
|
||||||
|
|
||||||
|
# View all services
|
||||||
|
laconic-so deployment --dir laconicd-deployment ps
|
||||||
|
|
||||||
|
# View logs from specific services
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs laconicd
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs console
|
||||||
|
```
|
||||||
|
|
||||||
|
## ConfigMaps
|
||||||
|
|
||||||
|
ConfigMaps allow you to mount configuration files into containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create the config directory in your deployment
|
||||||
|
mkdir -p my-deployment/data/my-config
|
||||||
|
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
||||||
|
|
||||||
|
# 2. Reference in spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Add to spec.yml:
|
||||||
|
```yaml
|
||||||
|
configmaps:
|
||||||
|
my-config: ./data/my-config
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 3. Restart to apply
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
||||||
|
|
||||||
|
## Deployment Directory Structure
|
||||||
|
|
||||||
|
A typical deployment directory contains:
|
||||||
|
|
||||||
|
```
|
||||||
|
my-deployment/
|
||||||
|
├── compose/
|
||||||
|
│ └── docker-compose-*.yml # Generated compose files
|
||||||
|
├── config.env # Environment variables
|
||||||
|
├── deployment.yml # Deployment metadata
|
||||||
|
├── spec.yml # Deployment specification
|
||||||
|
└── data/ # Volume mounts and configs
|
||||||
|
├── service-data/ # Persistent service data
|
||||||
|
└── config-maps/ # ConfigMap files
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
**Problem: "Cannot connect to Docker daemon"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure Docker is running
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Start Docker if needed (macOS)
|
||||||
|
open -a Docker
|
||||||
|
|
||||||
|
# Start Docker (Linux)
|
||||||
|
sudo systemctl start docker
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Port already in use"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Either stop the conflicting service or use different ports
|
||||||
|
# Edit spec.yml before creating deployment:
|
||||||
|
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8081:80' # Use 8081 instead of 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Image not found"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build containers first
|
||||||
|
laconic-so --stack your-stack build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Volumes not persisting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if you used --delete-volumes when stopping
|
||||||
|
# Volume data is in: <deployment-dir>/data/
|
||||||
|
|
||||||
|
# Don't use --delete-volumes if you want to keep data:
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Only use --delete-volumes when you want to reset completely:
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Services not starting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check logs for errors
|
||||||
|
laconic-so deployment --dir my-deployment logs
|
||||||
|
|
||||||
|
# Check Docker container status
|
||||||
|
docker ps -a
|
||||||
|
|
||||||
|
# Try stopping and starting again
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspecting Deployment State
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check deployment directory structure
|
||||||
|
ls -la my-deployment/
|
||||||
|
|
||||||
|
# Check running containers
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Check container details
|
||||||
|
docker inspect <container-name>
|
||||||
|
|
||||||
|
# Check networks
|
||||||
|
docker network ls
|
||||||
|
|
||||||
|
# Check volumes
|
||||||
|
docker volume ls
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Commands Reference
|
||||||
|
|
||||||
|
### Stack Operations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone required repositories
|
||||||
|
laconic-so --stack <name> setup-repositories
|
||||||
|
|
||||||
|
# Build container images
|
||||||
|
laconic-so --stack <name> build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Initialization
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec with defaults
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file>
|
||||||
|
|
||||||
|
# Initialize with configuration
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file> \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Creation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create deployment directory from spec
|
||||||
|
laconic-so --stack <name> deploy create \
|
||||||
|
--spec-file <spec-file> \
|
||||||
|
--deployment-dir <dir>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Management
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start all services
|
||||||
|
laconic-so deployment --dir <dir> start
|
||||||
|
|
||||||
|
# Stop services (preserves volumes)
|
||||||
|
laconic-so deployment --dir <dir> stop
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so deployment --dir <dir> stop --delete-volumes
|
||||||
|
|
||||||
|
# List running services
|
||||||
|
laconic-so deployment --dir <dir> ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
||||||
|
|
||||||
|
# Show mapped port
|
||||||
|
laconic-so deployment --dir <dir> port <service> <private-port>
|
||||||
|
|
||||||
|
# Execute command in service
|
||||||
|
laconic-so deployment --dir <dir> exec <service> <command>
|
||||||
|
|
||||||
|
# Update configuration
|
||||||
|
laconic-so deployment --dir <dir> update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Quick Deploy Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start stack directly
|
||||||
|
laconic-so --stack <name> deploy up
|
||||||
|
|
||||||
|
# Stop stack
|
||||||
|
laconic-so --stack <name> deploy down [--delete-volumes]
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack <name> deploy logs
|
||||||
|
|
||||||
|
# Show port mapping
|
||||||
|
laconic-so --stack <name> deploy port <service> <port>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
||||||
|
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
||||||
|
- [Specification](./spec.md) - Internal structure and design
|
||||||
|
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
||||||
|
- [Web App Deployment](./webapp.md) - Deploying web applications
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
For more examples, see the test scripts:
|
||||||
|
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
||||||
|
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
- Docker Compose is the default and recommended deployment mode
|
||||||
|
- Two workflows: deployment directory (recommended) or quick deploy
|
||||||
|
- The standard workflow is: setup → build → init → create → start
|
||||||
|
- Configuration is flexible with multiple override layers
|
||||||
|
- Volume persistence is automatic unless explicitly deleted
|
||||||
|
- All deployment state is contained in the deployment directory
|
||||||
|
- For Kubernetes deployments, see separate K8s documentation
|
||||||
|
|
||||||
|
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
||||||
@ -1,9 +1,9 @@
|
|||||||
# Fetching pre-built container images
|
# Fetching pre-built container images
|
||||||
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
||||||
|
|
||||||
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
||||||
## Usage
|
## Usage
|
||||||
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
||||||
```
|
```
|
||||||
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
||||||
```
|
```
|
||||||
|
|||||||
@ -7,7 +7,7 @@ Deploy a local Gitea server, publish NPM packages to it, then use those packages
|
|||||||
```bash
|
```bash
|
||||||
laconic-so --stack build-support build-containers
|
laconic-so --stack build-support build-containers
|
||||||
laconic-so --stack package-registry setup-repositories
|
laconic-so --stack package-registry setup-repositories
|
||||||
laconic-so --stack package-registry build-containers
|
laconic-so --stack package-registry build-containers
|
||||||
laconic-so --stack package-registry deploy up
|
laconic-so --stack package-registry deploy up
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
113
docs/helm-chart-generation.md
Normal file
113
docs/helm-chart-generation.md
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Helm Chart Generation
|
||||||
|
|
||||||
|
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Install Kompose:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Linux
|
||||||
|
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
||||||
|
chmod +x kompose
|
||||||
|
sudo mv kompose /usr/local/bin/
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
brew install kompose
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
kompose version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### 1. Create spec file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Generate Helm chart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment \
|
||||||
|
--helm-chart
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Deploy to Kubernetes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install my-release my-deployment/chart
|
||||||
|
kubectl get pods -n zenith
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```bash
|
||||||
|
my-deployment/
|
||||||
|
├── spec.yml # Reference
|
||||||
|
├── stack.yml # Reference
|
||||||
|
└── chart/ # Helm chart
|
||||||
|
├── Chart.yaml
|
||||||
|
├── README.md
|
||||||
|
└── templates/
|
||||||
|
└── *.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate chart for stage1-zenithd
|
||||||
|
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output stage1-spec.yml
|
||||||
|
|
||||||
|
laconic-so --stack stage1-zenithd deploy create \
|
||||||
|
--spec-file stage1-spec.yml \
|
||||||
|
--deployment-dir stage1-deployment \
|
||||||
|
--helm-chart
|
||||||
|
|
||||||
|
# Deploy
|
||||||
|
helm install stage1-zenithd stage1-deployment/chart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Production Deployment (TODO)
|
||||||
|
|
||||||
|
### Local Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Access services using port-forward
|
||||||
|
kubectl port-forward service/zenithd 26657:26657
|
||||||
|
kubectl port-forward service/nginx-api-proxy 1317:80
|
||||||
|
kubectl port-forward service/cosmos-explorer 4173:4173
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production Access Options
|
||||||
|
|
||||||
|
- Option 1: Ingress + cert-manager (Recommended)
|
||||||
|
- Install ingress-nginx + cert-manager
|
||||||
|
- Point DNS to cluster LoadBalancer IP
|
||||||
|
- Auto-provisions Let's Encrypt TLS certs
|
||||||
|
- Access: `https://api.zenith.example.com`
|
||||||
|
- Option 2: Cloud LoadBalancer
|
||||||
|
- Use cloud provider's LoadBalancer service type
|
||||||
|
- Point DNS to assigned external IP
|
||||||
|
- Manual TLS cert management
|
||||||
|
- Option 3: Bare Metal (MetalLB + Ingress)
|
||||||
|
- MetalLB provides LoadBalancer IPs from local network
|
||||||
|
- Same Ingress setup as cloud
|
||||||
|
- Option 4: NodePort + External Proxy
|
||||||
|
- Expose services on 30000-32767 range
|
||||||
|
- External nginx/Caddy proxies 80/443 → NodePort
|
||||||
|
- Manual cert management
|
||||||
|
|
||||||
|
### Changes Needed
|
||||||
|
|
||||||
|
- Add Ingress template to charts
|
||||||
|
- Add TLS configuration to values.yaml
|
||||||
|
- Document cert-manager setup
|
||||||
|
- Add production deployment guide
|
||||||
26
docs/k8s-deployment-enhancements.md
Normal file
26
docs/k8s-deployment-enhancements.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# K8S Deployment Enhancements
|
||||||
|
## Controlling pod placement
|
||||||
|
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
||||||
|
### Pod/Node Affinity
|
||||||
|
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
||||||
|
|
||||||
|
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
||||||
|
```
|
||||||
|
node-affinities:
|
||||||
|
- label: nodetype
|
||||||
|
value: typeb
|
||||||
|
```
|
||||||
|
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
||||||
|
### Node Taint Toleration
|
||||||
|
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
||||||
|
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
||||||
|
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
||||||
|
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
||||||
|
|
||||||
|
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
||||||
|
```
|
||||||
|
node-tolerations:
|
||||||
|
- key: nodetype
|
||||||
|
value: typeb
|
||||||
|
```
|
||||||
|
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
||||||
@ -26,4 +26,3 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,9 @@ Note: this page is out of date (but still useful) - it will no longer be useful
|
|||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
|
|
||||||
The orchestrator's operation is driven by files shown below.
|
The orchestrator's operation is driven by files shown below.
|
||||||
|
|
||||||
- `repository-list.txt` contains the list of git repositories;
|
- `repository-list.txt` contains the list of git repositories;
|
||||||
- `container-image-list.txt` contains the list of container image names
|
- `container-image-list.txt` contains the list of container image names
|
||||||
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
||||||
- `container-build/` contains the files required to build each container image
|
- `container-build/` contains the files required to build each container image
|
||||||
|
|||||||
@ -7,7 +7,7 @@ compilation and static page generation are separated in the `build-webapp` and `
|
|||||||
|
|
||||||
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
||||||
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
||||||
not their build environment.
|
not their build environment.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
|
|||||||
128
laconic-network-deployment.md
Normal file
128
laconic-network-deployment.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Deploying to the Laconic Network
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
||||||
|
|
||||||
|
## Key Documentation in stack-orchestrator
|
||||||
|
|
||||||
|
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
||||||
|
- `docs/webapp.md` - Webapp building/running
|
||||||
|
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
### LRN (Laconic Resource Name)
|
||||||
|
Format: `lrn://laconic/[namespace]/[name]`
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `lrn://laconic/deployers/my-deployer-name`
|
||||||
|
- `lrn://laconic/dns/example.com`
|
||||||
|
- `lrn://laconic/deployments/example.com`
|
||||||
|
|
||||||
|
### Registry Record Types
|
||||||
|
|
||||||
|
| Record Type | Purpose |
|
||||||
|
|-------------|---------|
|
||||||
|
| `ApplicationRecord` | Published app metadata |
|
||||||
|
| `WebappDeployer` | Deployment service offering |
|
||||||
|
| `ApplicationDeploymentRequest` | User's request to deploy |
|
||||||
|
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
||||||
|
| `ApplicationDeploymentRecord` | Completed deployment result |
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
### 1. Direct Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentRequest
|
||||||
|
→ targets specific WebappDeployer (by LRN)
|
||||||
|
→ includes payment TX hash
|
||||||
|
→ Deployer picks up request, builds, deploys, publishes result
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Auction-Based Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentAuction
|
||||||
|
→ Deployers bid (commit/reveal phases)
|
||||||
|
→ Winner selected
|
||||||
|
→ User publishes request targeting winner
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key CLI Commands
|
||||||
|
|
||||||
|
### Publish a Deployer Service
|
||||||
|
```bash
|
||||||
|
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
||||||
|
--api-url https://deployer-api.example.com \
|
||||||
|
--name my-deployer \
|
||||||
|
--payment-address laconic1... \
|
||||||
|
--minimum-payment 1000alnt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Request Deployment (User Side)
|
||||||
|
```bash
|
||||||
|
laconic-so request-webapp-deployment --laconic-config config.yml \
|
||||||
|
--app lrn://laconic/apps/my-app \
|
||||||
|
--deployer lrn://laconic/deployers/xyz \
|
||||||
|
--make-payment auto
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Deployer Service (Deployer Side)
|
||||||
|
```bash
|
||||||
|
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
||||||
|
```
|
||||||
|
|
||||||
|
## Laconic Config File
|
||||||
|
|
||||||
|
All tools require a laconic config file (`laconic.toml`):
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[cosmos]
|
||||||
|
address_prefix = "laconic"
|
||||||
|
chain_id = "laconic_9000-1"
|
||||||
|
endpoint = "http://localhost:26657"
|
||||||
|
key = "<account-name>"
|
||||||
|
password = "<account-password>"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting Up a Local Laconicd Network
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone and build
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy create
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Implementation Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `publish_webapp_deployer.py` | Register deployment service on network |
|
||||||
|
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
||||||
|
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
||||||
|
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
||||||
|
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
||||||
|
| `request_webapp_undeployment.py` | Request app removal |
|
||||||
|
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
||||||
|
| `util.py` | LaconicRegistryClient - all registry interactions |
|
||||||
|
|
||||||
|
## Payment System
|
||||||
|
|
||||||
|
- **Token Denom**: `alnt` (Laconic network tokens)
|
||||||
|
- **Payment Options**:
|
||||||
|
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
||||||
|
- `--use-payment`: Reference existing payment TX
|
||||||
|
|
||||||
|
## What's NOT Well-Documented
|
||||||
|
|
||||||
|
1. No end-to-end tutorial for full deployment workflow
|
||||||
|
2. Stack publishing (vs webapp) process unclear
|
||||||
|
3. LRN naming conventions not formally specified
|
||||||
|
4. Payment economics and token mechanics
|
||||||
110
pyproject.toml
Normal file
110
pyproject.toml
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "laconic-stack-orchestrator"
|
||||||
|
version = "1.1.0"
|
||||||
|
description = "Orchestrates deployment of the Laconic stack"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "GNU Affero General Public License"}
|
||||||
|
authors = [
|
||||||
|
{name = "Cerc", email = "info@cerc.io"}
|
||||||
|
]
|
||||||
|
requires-python = ">=3.8"
|
||||||
|
classifiers = [
|
||||||
|
"Programming Language :: Python :: 3.8",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"python-decouple>=3.8",
|
||||||
|
"python-dotenv==1.0.0",
|
||||||
|
"GitPython>=3.1.32",
|
||||||
|
"tqdm>=4.65.0",
|
||||||
|
"python-on-whales>=0.64.0",
|
||||||
|
"click>=8.1.6",
|
||||||
|
"PyYAML>=6.0.1",
|
||||||
|
"ruamel.yaml>=0.17.32",
|
||||||
|
"pydantic==1.10.9",
|
||||||
|
"tomli==2.0.1",
|
||||||
|
"validators==0.22.0",
|
||||||
|
"kubernetes>=28.1.0",
|
||||||
|
"humanfriendly>=10.0",
|
||||||
|
"python-gnupg>=0.5.2",
|
||||||
|
"requests>=2.3.2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0.0",
|
||||||
|
"pytest-cov>=4.0.0",
|
||||||
|
"black>=22.0.0",
|
||||||
|
"flake8>=5.0.0",
|
||||||
|
"pyright>=1.1.0",
|
||||||
|
"yamllint>=1.28.0",
|
||||||
|
"pre-commit>=3.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
laconic-so = "stack_orchestrator.main:cli"
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
|
||||||
|
[tool.setuptools.package-data]
|
||||||
|
"*" = ["data/**"]
|
||||||
|
|
||||||
|
[tool.black]
|
||||||
|
line-length = 88
|
||||||
|
target-version = ['py38']
|
||||||
|
|
||||||
|
[tool.flake8]
|
||||||
|
max-line-length = 88
|
||||||
|
extend-ignore = ["E203", "W503", "E402"]
|
||||||
|
|
||||||
|
[tool.pyright]
|
||||||
|
pythonVersion = "3.9"
|
||||||
|
typeCheckingMode = "basic"
|
||||||
|
reportMissingImports = "none"
|
||||||
|
reportMissingModuleSource = "none"
|
||||||
|
reportUnusedImport = "error"
|
||||||
|
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
||||||
|
exclude = ["**/build/**", "**/__pycache__/**"]
|
||||||
|
|
||||||
|
[tool.mypy]
|
||||||
|
python_version = "3.8"
|
||||||
|
warn_return_any = true
|
||||||
|
warn_unused_configs = true
|
||||||
|
disallow_untyped_defs = true
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
python_classes = ["Test*"]
|
||||||
|
python_functions = ["test_*"]
|
||||||
|
markers = [
|
||||||
|
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||||
|
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
||||||
|
]
|
||||||
|
addopts = [
|
||||||
|
"--cov",
|
||||||
|
"--cov-report=term-missing",
|
||||||
|
"--cov-report=html",
|
||||||
|
"--strict-markers",
|
||||||
|
]
|
||||||
|
asyncio_default_fixture_loop_scope = "function"
|
||||||
|
|
||||||
|
[tool.coverage.run]
|
||||||
|
source = ["stack_orchestrator"]
|
||||||
|
disable_warnings = ["couldnt-parse"]
|
||||||
|
|
||||||
|
[tool.coverage.report]
|
||||||
|
exclude_lines = [
|
||||||
|
"pragma: no cover",
|
||||||
|
"def __repr__",
|
||||||
|
"raise AssertionError",
|
||||||
|
"raise NotImplementedError",
|
||||||
|
]
|
||||||
9
pyrightconfig.json
Normal file
9
pyrightconfig.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"pythonVersion": "3.9",
|
||||||
|
"typeCheckingMode": "basic",
|
||||||
|
"reportMissingImports": "none",
|
||||||
|
"reportMissingModuleSource": "none",
|
||||||
|
"reportUnusedImport": "error",
|
||||||
|
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
||||||
|
"exclude": ["**/build/**", "**/__pycache__/**"]
|
||||||
|
}
|
||||||
@ -11,3 +11,5 @@ tomli==2.0.1
|
|||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
humanfriendly>=10.0
|
||||||
|
python-gnupg>=0.5.2
|
||||||
|
requests>=2.3.2
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
# https://github.com/cerc-io/github-release-api
|
# https://github.com/cerc-io/github-release-api
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
@ -94,7 +94,7 @@ sudo apt -y install jq
|
|||||||
# laconic-so depends on git
|
# laconic-so depends on git
|
||||||
sudo apt -y install git
|
sudo apt -y install git
|
||||||
# curl used below
|
# curl used below
|
||||||
sudo apt -y install curl
|
sudo apt -y install curl
|
||||||
# docker repo add depends on gnupg and updated ca-certificates
|
# docker repo add depends on gnupg and updated ca-certificates
|
||||||
sudo apt -y install ca-certificates gnupg
|
sudo apt -y install ca-certificates gnupg
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
# Uses this script package to tag a new release:
|
# Uses this script package to tag a new release:
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
26
setup.py
26
setup.py
@ -1,5 +1,7 @@
|
|||||||
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
# See
|
||||||
|
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
@ -7,26 +9,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name="laconic-stack-orchestrator",
|
||||||
version=version,
|
version=version,
|
||||||
author='Cerc',
|
author="Cerc",
|
||||||
author_email='info@cerc.io',
|
author_email="info@cerc.io",
|
||||||
license='GNU Affero General Public License',
|
license="GNU Affero General Public License",
|
||||||
description='Orchestrates deployment of the Laconic stack',
|
description="Orchestrates deployment of the Laconic stack",
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=["stack_orchestrator"],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires='>=3.7',
|
python_requires=">=3.7",
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={'': ['data/**']},
|
package_data={"": ["data/**"]},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,11 +23,10 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return base_stack(config, stack)
|
return default_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -41,15 +40,27 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class default_stack(base_stack):
|
||||||
|
"""Default stack implementation for stacks without specific handling."""
|
||||||
|
|
||||||
|
def ensure_available(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_url(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class package_registry_stack(base_stack):
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
print(
|
||||||
|
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
||||||
|
f"{url_from_environment}"
|
||||||
|
)
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -62,10 +73,16 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the caller
|
# If not, print a message about how to start it and return fail to the
|
||||||
print("ERROR: The package-registry stack is not running, and no external registry "
|
# caller
|
||||||
"specified with CERC_NPM_REGISTRY_URL")
|
print(
|
||||||
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
"ERROR: The package-registry stack is not running, "
|
||||||
|
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"ERROR: Start the local package registry with: "
|
||||||
|
"laconic-so --stack package-registry deploy-system up"
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -76,7 +93,9 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
default_npm_registry_url = (
|
||||||
"CERC_NPM_AUTH_TOKEN", default=None
|
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
||||||
|
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
|
)
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -34,14 +35,17 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars:
|
||||||
|
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(
|
||||||
container_build_dir: str,
|
dev_root_path: str,
|
||||||
debug: bool,
|
container_build_dir: str,
|
||||||
force_rebuild: bool,
|
debug: bool,
|
||||||
extra_build_args: str):
|
force_rebuild: bool,
|
||||||
|
extra_build_args: str,
|
||||||
|
):
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -50,11 +54,15 @@ def make_container_build_env(dev_root_path: str,
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
container_build_env.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -67,12 +75,18 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
build_context.container_build_env.update(
|
||||||
|
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
||||||
|
)
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
"container-build"
|
||||||
|
)
|
||||||
|
temp_build_dir = container_parent_dir.joinpath(
|
||||||
|
build_context.container.replace("/", "-")
|
||||||
|
)
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -90,21 +104,34 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(
|
||||||
repo_dir = build_context.container.split('/')[1]
|
f"No script file found: {build_script_filename}, "
|
||||||
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
"using default build script"
|
||||||
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
)
|
||||||
|
repo_dir = build_context.container.split("/")[1]
|
||||||
|
# TODO: make this less of a hack -- should be specified in
|
||||||
|
# some metadata somewhere. Check if we have a repo for this
|
||||||
|
# container. If not, set the context dir to container-build subdir
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = (
|
||||||
build_command = os.path.join(build_context.container_build_dir,
|
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
)
|
||||||
|
build_command = (
|
||||||
|
os.path.join(build_context.container_build_dir, "default-build.sh")
|
||||||
|
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
print(
|
||||||
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
f"Executing: {build_command} with environment: "
|
||||||
|
f"{build_context.container_build_env}"
|
||||||
|
)
|
||||||
|
build_result = subprocess.run(
|
||||||
|
build_command, shell=True, env=build_context.container_build_env
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -117,33 +144,61 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option("--include", help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option("--exclude", help="don't build these containers")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
@click.option(
|
||||||
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
"--publish-images",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Publish the built images in the specified image registry",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", help="Specify the image registry for --publish-images"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
def command(
|
||||||
'''build the set of containers required for a complete stack'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
publish_images,
|
||||||
|
image_registry,
|
||||||
|
):
|
||||||
|
"""build the set of containers required for a complete stack"""
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -151,21 +206,22 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(
|
||||||
container_build_dir,
|
dev_root_path,
|
||||||
opts.o.debug,
|
container_build_dir,
|
||||||
force_rebuild,
|
opts.o.debug,
|
||||||
extra_build_args)
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
)
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path
|
dev_root_path,
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -174,10 +230,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit("container build failed and --continue-on-error not set, exiting")
|
error_exit(
|
||||||
|
"container build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Container Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Container Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,14 +32,18 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these packages")
|
@click.option("--include", help="only build these packages")
|
||||||
@click.option('--exclude', help="don\'t build these packages")
|
@click.option("--exclude", help="don't build these packages")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False,
|
@click.option(
|
||||||
help="Override existing target package version check -- force rebuild")
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override existing target package version check -- force rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
'''build the set of npm packages required for a complete stack'''
|
"""build the set of npm packages required for a complete stack"""
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -65,45 +69,54 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Build root directory doesn\'t exist, creating')
|
print("Build root directory doesn't exist, creating")
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "npm-package-list.txt"
|
||||||
|
) as package_list_file:
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config['npms']
|
packages_in_scope = stack_config["npms"]
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Packages: {packages_in_scope}')
|
print(f"Packages: {packages_in_scope}")
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
# Copy the repo and build that to avoid propagating
|
||||||
|
# JS tooling file changes back into the cloned repo
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -116,41 +129,63 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
build_command = [
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"cd /workspace && "
|
||||||
|
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
||||||
|
]
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
# so for now we use dict.update:
|
||||||
}
|
envs = {
|
||||||
|
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
|
# Convention used by our web app packages
|
||||||
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
||||||
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
envs.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
docker.run(builder_js_image_name,
|
docker.run(
|
||||||
remove=True,
|
builder_js_image_name,
|
||||||
interactive=True,
|
remove=True,
|
||||||
tty=True,
|
interactive=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
tty=True,
|
||||||
envs=envs,
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
envs=envs,
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
# TODO: detect this host name in npm_registry_url
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
# rather than hard-wiring it
|
||||||
command=build_command
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
)
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
# Note that although the docs say that build_result should contain
|
command=build_command,
|
||||||
# the command output as a string, in reality it is always the empty string.
|
)
|
||||||
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
# Note that although the docs say that build_result should
|
||||||
|
# contain the command output as a string, in reality it is
|
||||||
|
# always the empty string. Since we detect errors via catching
|
||||||
|
# exceptions below, we can safely ignore it here.
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
print(
|
||||||
|
"FATAL Error: build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -168,6 +203,12 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
print(
|
||||||
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
f"FATAL: builder image: {builder_js_image_name} is required "
|
||||||
|
"but was not found"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Please run this command to create it: "
|
||||||
|
"laconic-so --stack build-support build-containers"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -24,6 +24,5 @@ class BuildContext:
|
|||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str,str]
|
container_build_env: Mapping[str, str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,21 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config["containers"]
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "container-image-list.txt"
|
||||||
|
) as container_list_file:
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Containers: {containers_in_scope}')
|
print(f"Containers: {containers_in_scope}")
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -32,40 +33,55 @@ from stack_orchestrator.build.build_types import BuildContext
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option("--base-container")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option(
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
"--source-repo", help="directory containing the webapp to build", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
'''build the specified webapp container'''
|
"""build the specified webapp container"""
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
logger.log(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f'Dev Root is: {dev_root_path}')
|
logger.log(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(
|
||||||
force_rebuild, extra_build_args)
|
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -85,12 +101,13 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script,
|
||||||
|
# but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(container_build_dir,
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
||||||
base_container.replace("/", "-"),
|
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
||||||
"Dockerfile.webapp")
|
)
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,7 +52,8 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
# $ curl -u "my-username:my-token" -X GET \
|
||||||
|
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -60,7 +61,9 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
response = requests.get(
|
||||||
|
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
||||||
|
)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -68,7 +71,10 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
error_exit(
|
||||||
|
f"failed to fetch tags from image registry, "
|
||||||
|
f"status code: {response.status_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -79,9 +85,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(container: str,
|
def _filter_for_platform(
|
||||||
registry_info: RegistryInfo,
|
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
||||||
tag_list: List[str]) -> List[str] :
|
) -> List[str]:
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -98,7 +104,7 @@ def _filter_for_platform(container: str,
|
|||||||
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"manifest: {manifest}")
|
print(f"manifest: {manifest}")
|
||||||
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image_architecture: {image_architecture}")
|
print(f"image_architecture: {image_architecture}")
|
||||||
if this_machine == image_architecture:
|
if this_machine == image_architecture:
|
||||||
@ -137,21 +143,44 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only fetch these containers")
|
@click.option("--include", help="only fetch these containers")
|
||||||
@click.option('--exclude', help="don\'t fetch these containers")
|
@click.option("--exclude", help="don't fetch these containers")
|
||||||
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
@click.option(
|
||||||
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
"--force-local-overwrite",
|
||||||
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
is_flag=True,
|
||||||
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
default=False,
|
||||||
|
help="Overwrite a locally built image, if present",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-username", required=True, help="Specify the image registry username"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-token", required=True, help="Specify the image registry access token"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
def command(
|
||||||
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_local_overwrite,
|
||||||
|
image_registry,
|
||||||
|
registry_username,
|
||||||
|
registry_token,
|
||||||
|
):
|
||||||
|
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
docker.login(
|
||||||
|
registry_info.registry,
|
||||||
|
registry_info.registry_username,
|
||||||
|
registry_info.registry_token,
|
||||||
|
)
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -172,19 +201,24 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if (_exists_locally(container)):
|
if _exists_locally(container):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if (force_local_overwrite):
|
if force_local_overwrite:
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Warning: overwriting local tag from this image: {container} because "
|
print(
|
||||||
"--force-local-overwrite was specified")
|
f"Warning: overwriting local tag from this image: "
|
||||||
|
f"{container} because --force-local-overwrite was specified"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Skipping local tagging for this image: {container} because that would "
|
print(
|
||||||
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
f"Skipping local tagging for this image: {container} "
|
||||||
|
"because that would overwrite an existing :local tagged "
|
||||||
|
"image, use --force-local-overwrite to do so."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -192,4 +226,7 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
print(
|
||||||
|
"Warning: couldn't find usable images for one or more containers, "
|
||||||
|
"this stack will not deploy"
|
||||||
|
)
|
||||||
|
|||||||
@ -34,5 +34,13 @@ volumes_key = "volumes"
|
|||||||
security_key = "security"
|
security_key = "security"
|
||||||
annotations_key = "annotations"
|
annotations_key = "annotations"
|
||||||
labels_key = "labels"
|
labels_key = "labels"
|
||||||
|
replicas_key = "replicas"
|
||||||
|
node_affinities_key = "node-affinities"
|
||||||
|
node_tolerations_key = "node-tolerations"
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
|
cri_base_filename = "cri-base.json"
|
||||||
|
unlimited_memlock_key = "unlimited-memlock"
|
||||||
|
runtime_class_key = "runtime-class"
|
||||||
|
high_memlock_runtime = "high-memlock"
|
||||||
|
high_memlock_spec_filename = "high-memlock-spec.json"
|
||||||
|
|||||||
@ -20,7 +20,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
||||||
@ -51,7 +51,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -73,7 +73,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -14,4 +14,3 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
image: blastio/blast-geth:${NETWORK:-mainnet}
|
||||||
@ -53,7 +53,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -76,7 +76,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -17,4 +17,3 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,5 @@ services:
|
|||||||
ping-pub:
|
ping-pub:
|
||||||
image: cerc/ping-pub:local
|
image: cerc/ping-pub:local
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
|
||||||
LACONIC_LACONICD_CHAIN_ID: ${LACONIC_LACONICD_CHAIN_ID:-laconic_9000-1}
|
|
||||||
LACONIC_LACONICD_RPC_URL: ${LACONIC_LACONICD_RPC_URL:-http://localhost:26657}
|
|
||||||
LACONIC_LACONICD_API_URL: ${LACONIC_LACONICD_API_URL:-http://localhost:1317}
|
|
||||||
ports:
|
ports:
|
||||||
- 5173
|
- "5173:5173"
|
||||||
|
|||||||
@ -32,4 +32,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
reth_data:
|
reth_data:
|
||||||
lighthouse_data:
|
lighthouse_data:
|
||||||
shared_data:
|
shared_data:
|
||||||
|
|||||||
@ -12,7 +12,7 @@ services:
|
|||||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||||
ports:
|
ports:
|
||||||
- "5432"
|
- "5432"
|
||||||
|
|
||||||
test-client:
|
test-client:
|
||||||
image: cerc/test-database-client:local
|
image: cerc/test-database-client:local
|
||||||
|
|
||||||
|
|||||||
@ -8,6 +8,8 @@ services:
|
|||||||
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
CERC_TEST_PARAM_2: "CERC_TEST_PARAM_2_VALUE"
|
||||||
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
CERC_TEST_PARAM_3: ${CERC_TEST_PARAM_3:-FAILED}
|
||||||
volumes:
|
volumes:
|
||||||
|
- ../config/test/script.sh:/opt/run.sh
|
||||||
|
- ../config/test/settings.env:/opt/settings.env
|
||||||
- test-data-bind:/data
|
- test-data-bind:/data
|
||||||
- test-data-auto:/data2
|
- test-data-auto:/data2
|
||||||
- test-config:/config:ro
|
- test-config:/config:ro
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
||||||
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
||||||
|
|||||||
@ -1411,4 +1411,4 @@
|
|||||||
"uid": "nT9VeZoVk",
|
"uid": "nT9VeZoVk",
|
||||||
"version": 2,
|
"version": 2,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,6 +10,7 @@ MONIKER="localtestnet"
|
|||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="${LOGLEVEL:-info}"
|
||||||
|
DENOM="alnt"
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||||
@ -33,7 +34,7 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom photon
|
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
||||||
|
|
||||||
update_genesis() {
|
update_genesis() {
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
||||||
@ -88,15 +89,13 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
sed -i 's/prometheus = false/prometheus = true/g' $HOME/.laconicd/config/config.toml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run this to allow requests from any origin
|
|
||||||
sed -i 's/cors_allowed_origins.*$/cors_allowed_origins = ["*"]/' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/enabled-unsafe-cors.*$/enabled-unsafe-cors = true/' $HOME/.laconicd/config/app.toml
|
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
laconicd genesis add-genesis-account $KEY 100000000000000000000000000photon --keyring-backend $KEYRING
|
# 10^30 alnt | 10^12 lnt
|
||||||
|
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000photon --keyring-backend $KEYRING --chain-id $CHAINID
|
# 10^24 alnt | 10^6 lnt
|
||||||
|
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd genesis collect-gentxs
|
||||||
@ -111,7 +110,7 @@ fi
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=0.0001photon \
|
--minimum-gas-prices=1$DENOM \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
--rpc.laddr="tcp://0.0.0.0:26657" \
|
||||||
--gql-server --gql-playground
|
--gql-server --gql-playground
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 200000photon
|
fees: 2000000alnt
|
||||||
|
|||||||
@ -65,7 +65,7 @@ if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then
|
|||||||
# Sequencer
|
# Sequencer
|
||||||
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
||||||
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
||||||
|
|
||||||
echo "Funding accounts."
|
echo "Funding accounts."
|
||||||
wait_for_block 1 300
|
wait_for_block 1 300
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
||||||
|
|||||||
@ -56,7 +56,7 @@
|
|||||||
"value": "!validator-pubkey"
|
"value": "!validator-pubkey"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"supply": []
|
"supply": []
|
||||||
},
|
},
|
||||||
@ -269,4 +269,4 @@
|
|||||||
"claims": null
|
"claims": null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2084,4 +2084,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -29,4 +29,3 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,7 +12,10 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
command = (
|
||||||
|
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
||||||
|
f"-d {db_name} -c --inserts -f {file_name}"
|
||||||
|
)
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 200000photon
|
fees: 2000000alnt
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ LOGLEVEL="info"
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=0.0001photon \
|
--minimum-gas-prices=1alnt \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
|||||||
@ -1901,4 +1901,4 @@
|
|||||||
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
||||||
"version": 18,
|
"version": 18,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
|
|||||||
@ -849,7 +849,7 @@ groups:
|
|||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
|
|
||||||
# Secured Finance
|
# Secured Finance
|
||||||
- uid: secured_finance_diff_external
|
- uid: secured_finance_diff_external
|
||||||
title: secured_finance_watcher_head_tracking
|
title: secured_finance_watcher_head_tracking
|
||||||
|
|||||||
@ -14,7 +14,7 @@ echo ACCOUNT_PRIVATE_KEY=${CERC_PRIVATE_KEY_DEPLOYER} >> .env
|
|||||||
if [ -f ${erc20_address_file} ]; then
|
if [ -f ${erc20_address_file} ]; then
|
||||||
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
||||||
cat ${erc20_address_file}
|
cat ${erc20_address_file}
|
||||||
|
|
||||||
# Keep the container running
|
# Keep the container running
|
||||||
tail -f
|
tail -f
|
||||||
fi
|
fi
|
||||||
|
|||||||
3
stack_orchestrator/data/config/test/script.sh
Normal file
3
stack_orchestrator/data/config/test/script.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Hello"
|
||||||
1
stack_orchestrator/data/config/test/settings.env
Normal file
1
stack_orchestrator/data/config/test/settings.env
Normal file
@ -0,0 +1 @@
|
|||||||
|
ANSWER=42
|
||||||
@ -940,4 +940,3 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -18,4 +18,3 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ local_npm_registry_url=$2
|
|||||||
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
||||||
# Use yarn info to get URL checksums etc from the new registry
|
# Use yarn info to get URL checksums etc from the new registry
|
||||||
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
||||||
# First check if the target version actually exists.
|
# First check if the target version actually exists.
|
||||||
# If it doesn't exist there will be no .data.dist.tarball element,
|
# If it doesn't exist there will be no .data.dist.tarball element,
|
||||||
# and jq will output the string "null"
|
# and jq will output the string "null"
|
||||||
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
||||||
|
|||||||
@ -11,6 +11,8 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data['el_premine'].items():
|
for key, value in data["el_premine"].items():
|
||||||
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
acct = w3.eth.account.from_mnemonic(
|
||||||
|
data["mnemonic"], account_path=key, passphrase=""
|
||||||
|
)
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -4,4 +4,4 @@ out = 'out'
|
|||||||
libs = ['lib']
|
libs = ['lib']
|
||||||
remappings = ['ds-test/=lib/ds-test/src/']
|
remappings = ['ds-test/=lib/ds-test/src/']
|
||||||
|
|
||||||
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
||||||
|
|||||||
@ -20,4 +20,4 @@ contract Stateful {
|
|||||||
function inc() public {
|
function inc() public {
|
||||||
x = x + 1;
|
x = x + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,7 +14,7 @@ funds_balance=$(echo ${funds_response} | jq -r ".[0].balance[0].quantity")
|
|||||||
echo "Balance is: ${funds_balance}"
|
echo "Balance is: ${funds_balance}"
|
||||||
|
|
||||||
# Create a bond
|
# Create a bond
|
||||||
bond_create_result=$(${registry_command} bond create --type photon --quantity 1000000000)
|
bond_create_result=$(${registry_command} bond create --type alnt --quantity 1000000000)
|
||||||
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
||||||
echo "Created bond with id: ${bond_id}"
|
echo "Created bond with id: ${bond_id}"
|
||||||
|
|
||||||
|
|||||||
@ -11,4 +11,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -9,4 +9,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Build cerc/laconicd
|
# Build cerc/laconicd
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
||||||
|
|||||||
@ -26,8 +26,14 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
|
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||||
|
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
|
./build-webapp.sh || exit 1
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
if [ -f "next.config.mjs" ]; then
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
NEXT_CONFIG_JS="next.config.mjs"
|
||||||
IMPORT_OR_REQUIRE="import"
|
IMPORT_OR_REQUIRE="import"
|
||||||
|
|||||||
@ -30,36 +30,44 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
if [ -f "./run-webapp.sh" ]; then
|
||||||
mv .next .next.old
|
echo "Running webapp with run-webapp.sh ..."
|
||||||
mv .next-r/.next .
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
./run-webapp.sh &
|
||||||
|
tpid=$!
|
||||||
|
wait $tpid
|
||||||
|
else
|
||||||
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
|
mv .next .next.old
|
||||||
|
mv .next-r/.next .
|
||||||
|
|
||||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
npm run cerc_generate > gen.out 2>&1 &
|
npm run cerc_generate > gen.out 2>&1 &
|
||||||
tail -f gen.out &
|
tail -f gen.out &
|
||||||
tpid=$!
|
tpid=$!
|
||||||
|
|
||||||
count=0
|
count=0
|
||||||
generate_done="false"
|
generate_done="false"
|
||||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
grep 'rendered as static' gen.out > /dev/null
|
grep 'rendered as static' gen.out > /dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
generate_done="true"
|
generate_done="true"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $generate_done != "true" ]; then
|
||||||
|
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
if [ $generate_done != "true" ]; then
|
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
tpid=""
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
|
||||||
tpid=""
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
|
fi
|
||||||
|
|||||||
@ -5,4 +5,3 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN yarn
|
RUN yarn
|
||||||
|
|
||||||
|
|||||||
@ -4,5 +4,9 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
||||||
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/explorer
|
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/cosmos-explorer
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "FATAL: Base container build failed, exiting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
||||||
|
|||||||
@ -7,16 +7,16 @@
|
|||||||
"rpc": [
|
"rpc": [
|
||||||
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
||||||
],
|
],
|
||||||
"sdk_version": "0.45.1",
|
"sdk_version": "0.50.3",
|
||||||
"coin_type": "118",
|
"coin_type": "118",
|
||||||
"min_tx_fee": "800",
|
"min_tx_fee": "800",
|
||||||
"addr_prefix": "ethm",
|
"addr_prefix": "laconic",
|
||||||
"logo": "/logos/cosmos.svg",
|
"logo": "/logos/cosmos.svg",
|
||||||
"assets": [{
|
"assets": [{
|
||||||
"base": "photon",
|
"base": "alnt",
|
||||||
"symbol": "LNT",
|
"symbol": "LNT",
|
||||||
"exponent": "6",
|
"exponent": "18",
|
||||||
"coingecko_id": "cosmos",
|
"coingecko_id": "cosmos",
|
||||||
"logo": "/logos/cosmos.svg"
|
"logo": "/logos/cosmos.svg"
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,11 +26,6 @@ fi
|
|||||||
# subvert this lunacy.
|
# subvert this lunacy.
|
||||||
explorer_mainnet_config_dir=/app/chains/mainnet
|
explorer_mainnet_config_dir=/app/chains/mainnet
|
||||||
explorer_testnet_config_dir=/app/chains/testnet
|
explorer_testnet_config_dir=/app/chains/testnet
|
||||||
|
|
||||||
# Create required directories
|
|
||||||
mkdir -p $explorer_mainnet_config_dir
|
|
||||||
mkdir -p $explorer_testnet_config_dir
|
|
||||||
|
|
||||||
config_template_file=/config/chains/laconic-chaindata-template.json
|
config_template_file=/config/chains/laconic-chaindata-template.json
|
||||||
chain_config_name=laconic.json
|
chain_config_name=laconic.json
|
||||||
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
mainnet_config_file=${explorer_mainnet_config_dir}/${chain_config_name}
|
||||||
|
|||||||
@ -1,9 +1,6 @@
|
|||||||
FROM ubuntu:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && export DEBCONF_NOWARNINGS="yes" && \
|
RUN apk add --no-cache nginx
|
||||||
apt-get install -y software-properties-common && \
|
|
||||||
apt-get install -y nginx && \
|
|
||||||
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env sh
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
@ -8,14 +8,14 @@ fi
|
|||||||
echo "Test container starting"
|
echo "Test container starting"
|
||||||
|
|
||||||
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
|
DATA_DEVICE=$(df | grep "/data$" | awk '{ print $1 }')
|
||||||
if [[ -n "$DATA_DEVICE" ]]; then
|
if [ -n "$DATA_DEVICE" ]; then
|
||||||
echo "/data: MOUNTED dev=${DATA_DEVICE}"
|
echo "/data: MOUNTED dev=${DATA_DEVICE}"
|
||||||
else
|
else
|
||||||
echo "/data: not mounted"
|
echo "/data: not mounted"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
|
DATA2_DEVICE=$(df | grep "/data2$" | awk '{ print $1 }')
|
||||||
if [[ -n "$DATA_DEVICE" ]]; then
|
if [ -n "$DATA_DEVICE" ]; then
|
||||||
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
|
echo "/data2: MOUNTED dev=${DATA2_DEVICE}"
|
||||||
else
|
else
|
||||||
echo "/data2: not mounted"
|
echo "/data2: not mounted"
|
||||||
@ -23,7 +23,7 @@ fi
|
|||||||
|
|
||||||
# Test if the container's filesystem is old (run previously) or new
|
# Test if the container's filesystem is old (run previously) or new
|
||||||
for d in /data /data2; do
|
for d in /data /data2; do
|
||||||
if [[ -f "$d/exists" ]];
|
if [ -f "$d/exists" ];
|
||||||
then
|
then
|
||||||
TIMESTAMP=`cat $d/exists`
|
TIMESTAMP=`cat $d/exists`
|
||||||
echo "$d filesystem is old, created: $TIMESTAMP"
|
echo "$d filesystem is old, created: $TIMESTAMP"
|
||||||
@ -52,7 +52,7 @@ fi
|
|||||||
if [ -d "/config" ]; then
|
if [ -d "/config" ]; then
|
||||||
echo "/config: EXISTS"
|
echo "/config: EXISTS"
|
||||||
for f in /config/*; do
|
for f in /config/*; do
|
||||||
if [[ -f "$f" ]] || [[ -L "$f" ]]; then
|
if [ -f "$f" ] || [ -L "$f" ]; then
|
||||||
echo "$f:"
|
echo "$f:"
|
||||||
cat "$f"
|
cat "$f"
|
||||||
echo ""
|
echo ""
|
||||||
@ -64,4 +64,4 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Run nginx which will block here forever
|
# Run nginx which will block here forever
|
||||||
/usr/sbin/nginx -g "daemon off;"
|
nginx -g "daemon off;"
|
||||||
|
|||||||
@ -2,4 +2,4 @@
|
|||||||
# Build cerc/test-container
|
# Build cerc/test-container
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
||||||
|
|||||||
@ -8,7 +8,7 @@ CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
|||||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||||
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
||||||
|
|
||||||
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
||||||
# If there is only one HTML file, assume an SPA.
|
# If there is only one HTML file, assume an SPA.
|
||||||
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
||||||
CERC_SINGLE_PAGE_APP=true
|
CERC_SINGLE_PAGE_APP=true
|
||||||
|
|||||||
@ -0,0 +1,260 @@
|
|||||||
|
# Caddy Ingress Controller for kind
|
||||||
|
# Based on: https://github.com/caddyserver/ingress
|
||||||
|
# Provides automatic HTTPS with Let's Encrypt
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
- endpoints
|
||||||
|
- nodes
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses/status
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingressclasses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller-configmap
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
data:
|
||||||
|
# Caddy global options
|
||||||
|
acmeCA: "https://acme-v02.api.letsencrypt.org/directory"
|
||||||
|
email: ""
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
targetPort: https
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
serviceAccountName: caddy-ingress-controller
|
||||||
|
terminationGracePeriodSeconds: 60
|
||||||
|
nodeSelector:
|
||||||
|
ingress-ready: "true"
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/master
|
||||||
|
operator: Equal
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Equal
|
||||||
|
containers:
|
||||||
|
- name: caddy-ingress-controller
|
||||||
|
image: caddy/ingress:latest
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: 80
|
||||||
|
hostPort: 80
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
containerPort: 443
|
||||||
|
hostPort: 443
|
||||||
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
args:
|
||||||
|
- -config-map=caddy-system/caddy-ingress-controller-configmap
|
||||||
|
- -class-name=caddy
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 128Mi
|
||||||
|
limits:
|
||||||
|
cpu: 1000m
|
||||||
|
memory: 512Mi
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_BIND_SERVICE
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsUser: 0
|
||||||
|
runAsGroup: 0
|
||||||
|
volumeMounts:
|
||||||
|
- name: caddy-data
|
||||||
|
mountPath: /data
|
||||||
|
- name: caddy-config
|
||||||
|
mountPath: /config
|
||||||
|
volumes:
|
||||||
|
- name: caddy-data
|
||||||
|
emptyDir: {}
|
||||||
|
- name: caddy-config
|
||||||
|
emptyDir: {}
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: IngressClass
|
||||||
|
metadata:
|
||||||
|
name: caddy
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
annotations:
|
||||||
|
ingressclass.kubernetes.io/is-default-class: "true"
|
||||||
|
spec:
|
||||||
|
controller: caddy.io/ingress-controller
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user