forked from cerc-io/stack-orchestrator
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 55b76b9b57 | |||
|
|
d07a3afd27 | ||
|
|
a5b373da26 | ||
|
|
99db75da19 | ||
|
|
d4e935484f | ||
|
|
4f01054781 | ||
|
|
811bbd9db4 | ||
|
|
8d9682eb47 | ||
|
|
638435873c | ||
|
|
97a85359ff | ||
|
|
ffa00767d4 | ||
|
|
86462c940f | ||
|
|
87db167d7f | ||
|
|
dd856af2d3 | ||
|
|
cd3d908d0d | ||
|
|
03f9acf869 | ||
|
|
ba1aad9fa6 | ||
|
|
dc36a6564a | ||
|
|
c5c3fc1618 | ||
|
|
2e384b7179 | ||
|
|
b708836aa9 | ||
|
|
d8da9b6515 | ||
|
|
5a1399f2b2 | ||
|
|
89db6e1e92 | ||
|
|
9bd59f29d9 | ||
| 55d6c5b495 | |||
|
|
f3ef3e9a1f | ||
|
|
1768bd0fe1 | ||
| 8afae1904b | |||
| 7acabb0743 | |||
| ccccd9f957 | |||
| 34f3b719e4 | |||
| 0e814bd4da | |||
| 873a6d472c | |||
| 39df4683ac | |||
| 23ca4c4341 | |||
| f64ef5d128 | |||
| 5f8e809b2d | |||
| 4a7df2de33 | |||
| 0c47da42fe | |||
| e290c62aca |
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -33,7 +34,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: K8s Deploy Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: K8s Deployment Control Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -2,7 +2,8 @@ name: Webapp Test
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: '*'
|
branches:
|
||||||
|
- main
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
@ -32,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
|||||||
@ -1 +1,3 @@
|
|||||||
Change this file to trigger running the test-container-registry CI job
|
Change this file to trigger running the test-container-registry CI job
|
||||||
|
Triggered: 2026-01-21
|
||||||
|
Triggered: 2026-01-21 19:28:29
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
Change this file to trigger running the test-database CI job
|
Change this file to trigger running the test-database CI job
|
||||||
Trigger test run
|
Trigger test run
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
Change this file to trigger running the fixturenet-eth-test CI job
|
||||||
|
|
||||||
|
|||||||
34
.pre-commit-config.yaml
Normal file
34
.pre-commit-config.yaml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v5.0.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
args: ['--allow-multiple-documents']
|
||||||
|
- id: check-json
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-added-large-files
|
||||||
|
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 23.12.1
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
language_version: python3
|
||||||
|
|
||||||
|
- repo: https://github.com/PyCQA/flake8
|
||||||
|
rev: 7.1.1
|
||||||
|
hooks:
|
||||||
|
- id: flake8
|
||||||
|
args: ['--max-line-length=88', '--extend-ignore=E203,W503,E402']
|
||||||
|
|
||||||
|
- repo: https://github.com/RobertCraigie/pyright-python
|
||||||
|
rev: v1.1.345
|
||||||
|
hooks:
|
||||||
|
- id: pyright
|
||||||
|
|
||||||
|
- repo: https://github.com/adrienverge/yamllint
|
||||||
|
rev: v1.35.1
|
||||||
|
hooks:
|
||||||
|
- id: yamllint
|
||||||
|
args: [-d, relaxed]
|
||||||
151
AI-FRIENDLY-PLAN.md
Normal file
151
AI-FRIENDLY-PLAN.md
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
# Plan: Make Stack-Orchestrator AI-Friendly
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Make the stack-orchestrator repository easier for AI tools (Claude Code, Cursor, Copilot) to understand and use for generating stacks, including adding a `create-stack` command.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 1: Documentation & Context Files
|
||||||
|
|
||||||
|
### 1.1 Add CLAUDE.md
|
||||||
|
|
||||||
|
Create a root-level context file for AI assistants.
|
||||||
|
|
||||||
|
**File:** `CLAUDE.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Project overview (what stack-orchestrator does)
|
||||||
|
- Stack creation workflow (step-by-step)
|
||||||
|
- File naming conventions
|
||||||
|
- Required vs optional fields in stack.yml
|
||||||
|
- Common patterns and anti-patterns
|
||||||
|
- Links to example stacks (simple, medium, complex)
|
||||||
|
|
||||||
|
### 1.2 Add JSON Schema for stack.yml
|
||||||
|
|
||||||
|
Create formal validation schema.
|
||||||
|
|
||||||
|
**File:** `schemas/stack-schema.json`
|
||||||
|
|
||||||
|
Benefits:
|
||||||
|
- AI tools can validate generated stacks
|
||||||
|
- IDEs provide autocomplete
|
||||||
|
- CI can catch errors early
|
||||||
|
|
||||||
|
### 1.3 Add Template Stack with Comments
|
||||||
|
|
||||||
|
Create an annotated template for reference.
|
||||||
|
|
||||||
|
**File:** `stack_orchestrator/data/stacks/_template/stack.yml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Stack definition template - copy this directory to create a new stack
|
||||||
|
version: "1.2" # Required: 1.0, 1.1, or 1.2
|
||||||
|
name: my-stack # Required: lowercase, hyphens only
|
||||||
|
description: "Human-readable description" # Optional
|
||||||
|
repos: # Git repositories to clone
|
||||||
|
- github.com/org/repo
|
||||||
|
containers: # Container images to build (must have matching container-build/)
|
||||||
|
- cerc/my-container
|
||||||
|
pods: # Deployment units (must have matching docker-compose-{pod}.yml)
|
||||||
|
- my-pod
|
||||||
|
```
|
||||||
|
|
||||||
|
### 1.4 Document Validation Rules
|
||||||
|
|
||||||
|
Create explicit documentation of constraints currently scattered in code.
|
||||||
|
|
||||||
|
**File:** `docs/stack-format.md`
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
- Container names must start with `cerc/`
|
||||||
|
- Pod names must match compose file: `docker-compose-{pod}.yml`
|
||||||
|
- Repository format: `host/org/repo[@ref]`
|
||||||
|
- Stack directory name should match `name` field
|
||||||
|
- Version field options and differences
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 2: Add `create-stack` Command
|
||||||
|
|
||||||
|
### 2.1 Command Overview
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --repo github.com/org/my-app [--name my-app] [--type webapp]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Behavior:**
|
||||||
|
1. Parse repo URL to extract app name (if --name not provided)
|
||||||
|
2. Create `stacks/{name}/stack.yml`
|
||||||
|
3. Create `container-build/cerc-{name}/Dockerfile` and `build.sh`
|
||||||
|
4. Create `compose/docker-compose-{name}.yml`
|
||||||
|
5. Update list files (repository-list.txt, container-image-list.txt, pod-list.txt)
|
||||||
|
|
||||||
|
### 2.2 Files to Create
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | Package init |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | Command implementation |
|
||||||
|
|
||||||
|
### 2.3 Files to Modify
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
### 2.4 Command Options
|
||||||
|
|
||||||
|
| Option | Required | Description |
|
||||||
|
|--------|----------|-------------|
|
||||||
|
| `--repo` | Yes | Git repository URL (e.g., github.com/org/repo) |
|
||||||
|
| `--name` | No | Stack name (defaults to repo name) |
|
||||||
|
| `--type` | No | Template type: webapp, service, empty (default: webapp) |
|
||||||
|
| `--force` | No | Overwrite existing files |
|
||||||
|
|
||||||
|
### 2.5 Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| webapp | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| service | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| empty | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part 3: Implementation Summary
|
||||||
|
|
||||||
|
### New Files (6)
|
||||||
|
|
||||||
|
1. `CLAUDE.md` - AI assistant context
|
||||||
|
2. `schemas/stack-schema.json` - Validation schema
|
||||||
|
3. `stack_orchestrator/data/stacks/_template/stack.yml` - Annotated template
|
||||||
|
4. `docs/stack-format.md` - Stack format documentation
|
||||||
|
5. `stack_orchestrator/create/__init__.py` - Package init
|
||||||
|
6. `stack_orchestrator/create/create_stack.py` - Command implementation
|
||||||
|
|
||||||
|
### Modified Files (1)
|
||||||
|
|
||||||
|
1. `stack_orchestrator/main.py` - Register create-stack command
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# 3. Creates all expected files
|
||||||
|
laconic-so create-stack --repo github.com/org/test-app
|
||||||
|
ls stack_orchestrator/data/stacks/test-app/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-test-app/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-test-app.yml
|
||||||
|
|
||||||
|
# 4. Build works with generated stack
|
||||||
|
laconic-so --stack test-app build-containers
|
||||||
|
```
|
||||||
50
CLAUDE.md
Normal file
50
CLAUDE.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code when working with the stack-orchestrator project.
|
||||||
|
|
||||||
|
## Some rules to follow
|
||||||
|
NEVER speculate about the cause of something
|
||||||
|
NEVER assume your hypotheses are true without evidence
|
||||||
|
|
||||||
|
ALWAYS clearly state when something is a hypothesis
|
||||||
|
ALWAYS use evidence from the systems your interacting with to support your claims and hypotheses
|
||||||
|
|
||||||
|
## Key Principles
|
||||||
|
|
||||||
|
### Development Guidelines
|
||||||
|
- **Single responsibility** - Each component has one clear purpose
|
||||||
|
- **Fail fast** - Let errors propagate, don't hide failures
|
||||||
|
- **DRY/KISS** - Minimize duplication and complexity
|
||||||
|
|
||||||
|
## Development Philosophy: Conversational Literate Programming
|
||||||
|
|
||||||
|
### Approach
|
||||||
|
This project follows principles inspired by literate programming, where development happens through explanatory conversation rather than code-first implementation.
|
||||||
|
|
||||||
|
### Core Principles
|
||||||
|
- **Documentation-First**: All changes begin with discussion of intent and reasoning
|
||||||
|
- **Narrative-Driven**: Complex systems are explained through conversational exploration
|
||||||
|
- **Justification Required**: Every coding task must have a corresponding TODO.md item explaining the "why"
|
||||||
|
- **Iterative Understanding**: Architecture and implementation evolve through dialogue
|
||||||
|
|
||||||
|
### Working Method
|
||||||
|
1. **Explore and Understand**: Read existing code to understand current state
|
||||||
|
2. **Discuss Architecture**: Workshop complex design decisions through conversation
|
||||||
|
3. **Document Intent**: Update TODO.md with clear justification before coding
|
||||||
|
4. **Explain Changes**: Each modification includes reasoning and context
|
||||||
|
5. **Maintain Narrative**: Conversations serve as living documentation of design evolution
|
||||||
|
|
||||||
|
### Implementation Guidelines
|
||||||
|
- Treat conversations as primary documentation
|
||||||
|
- Explain architectural decisions before implementing
|
||||||
|
- Use TODO.md as the "literate document" that justifies all work
|
||||||
|
- Maintain clear narrative threads across sessions
|
||||||
|
- Workshop complex ideas before coding
|
||||||
|
|
||||||
|
This approach treats the human-AI collaboration as a form of **conversational literate programming** where understanding emerges through dialogue before code implementation.
|
||||||
|
|
||||||
|
## Insights and Observations
|
||||||
|
|
||||||
|
### Design Principles
|
||||||
|
- **When something times out that doesn't mean it needs a longer timeout it means something that was expected never happened, not that we need to wait longer for it.**
|
||||||
|
- **NEVER change a timeout because you believe something truncated, you don't understand timeouts, don't edit them unless told to explicitly by user.**
|
||||||
2
LICENSE
2
LICENSE
@ -658,4 +658,4 @@
|
|||||||
You should also get your employer (if you work as a programmer) or school,
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
<http://www.gnu.org/licenses/>.
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|||||||
@ -26,7 +26,7 @@ curl -SL https://github.com/docker/compose/releases/download/v2.11.2/docker-comp
|
|||||||
chmod +x ~/.docker/cli-plugins/docker-compose
|
chmod +x ~/.docker/cli-plugins/docker-compose
|
||||||
```
|
```
|
||||||
|
|
||||||
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
|
||||||
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
|
||||||
|
|
||||||
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
Now, having selected that directory, download the latest release from [this page](https://git.vdb.to/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
|
||||||
@ -78,5 +78,3 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
|
|||||||
## Platform Support
|
## Platform Support
|
||||||
|
|
||||||
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
413
STACK-CREATION-GUIDE.md
Normal file
413
STACK-CREATION-GUIDE.md
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
# Implementing `laconic-so create-stack` Command
|
||||||
|
|
||||||
|
A plan for adding a new CLI command to scaffold stack files automatically.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Add a `create-stack` command that generates all required files for a new stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so create-stack --name my-stack --type webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output:**
|
||||||
|
```
|
||||||
|
stack_orchestrator/data/
|
||||||
|
├── stacks/my-stack/stack.yml
|
||||||
|
├── container-build/cerc-my-stack/
|
||||||
|
│ ├── Dockerfile
|
||||||
|
│ └── build.sh
|
||||||
|
└── compose/docker-compose-my-stack.yml
|
||||||
|
|
||||||
|
Updated: repository-list.txt, container-image-list.txt, pod-list.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CLI Architecture Summary
|
||||||
|
|
||||||
|
### Command Registration Pattern
|
||||||
|
|
||||||
|
Commands are Click functions registered in `main.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# main.py (line ~70)
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Global Options Access
|
||||||
|
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print("message")
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print("(would create files)")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Utilities
|
||||||
|
|
||||||
|
| Function | Location | Purpose |
|
||||||
|
|----------|----------|---------|
|
||||||
|
| `get_yaml()` | `util.py` | YAML parser (ruamel.yaml) |
|
||||||
|
| `get_stack_path(stack)` | `util.py` | Resolve stack directory path |
|
||||||
|
| `error_exit(msg)` | `util.py` | Print error and exit(1) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Create
|
||||||
|
|
||||||
|
### 1. Command Module
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/__init__.py`**
|
||||||
|
```python
|
||||||
|
# Empty file to make this a package
|
||||||
|
```
|
||||||
|
|
||||||
|
**`stack_orchestrator/create/create_stack.py`**
|
||||||
|
```python
|
||||||
|
import click
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import copy
|
||||||
|
from stack_orchestrator.opts import opts
|
||||||
|
from stack_orchestrator.util import error_exit, get_yaml
|
||||||
|
|
||||||
|
# Template types
|
||||||
|
STACK_TEMPLATES = {
|
||||||
|
"webapp": {
|
||||||
|
"description": "Web application with Node.js",
|
||||||
|
"base_image": "node:20-bullseye-slim",
|
||||||
|
"port": 3000,
|
||||||
|
},
|
||||||
|
"service": {
|
||||||
|
"description": "Backend service",
|
||||||
|
"base_image": "python:3.11-slim",
|
||||||
|
"port": 8080,
|
||||||
|
},
|
||||||
|
"empty": {
|
||||||
|
"description": "Minimal stack with no defaults",
|
||||||
|
"base_image": None,
|
||||||
|
"port": None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_dir() -> Path:
|
||||||
|
"""Get path to stack_orchestrator/data directory"""
|
||||||
|
return Path(__file__).absolute().parent.parent.joinpath("data")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_stack_name(name: str) -> None:
|
||||||
|
"""Validate stack name follows conventions"""
|
||||||
|
import re
|
||||||
|
if not re.match(r'^[a-z0-9][a-z0-9-]*[a-z0-9]$', name) and len(name) > 2:
|
||||||
|
error_exit(f"Invalid stack name '{name}'. Use lowercase alphanumeric with hyphens.")
|
||||||
|
if name.startswith("cerc-"):
|
||||||
|
error_exit("Stack name should not start with 'cerc-' (container names will add this prefix)")
|
||||||
|
|
||||||
|
|
||||||
|
def create_stack_yml(stack_dir: Path, name: str, template: dict, repo_url: str) -> None:
|
||||||
|
"""Create stack.yml file"""
|
||||||
|
config = {
|
||||||
|
"version": "1.2",
|
||||||
|
"name": name,
|
||||||
|
"description": template.get("description", f"Stack: {name}"),
|
||||||
|
"repos": [repo_url] if repo_url else [],
|
||||||
|
"containers": [f"cerc/{name}"],
|
||||||
|
"pods": [name],
|
||||||
|
}
|
||||||
|
|
||||||
|
stack_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(stack_dir / "stack.yml", "w") as f:
|
||||||
|
get_yaml().dump(config, f)
|
||||||
|
|
||||||
|
|
||||||
|
def create_dockerfile(container_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create Dockerfile"""
|
||||||
|
base_image = template.get("base_image", "node:20-bullseye-slim")
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
dockerfile_content = f'''# Build stage
|
||||||
|
FROM {base_image} AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
# Production stage
|
||||||
|
FROM {base_image}
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci --only=production
|
||||||
|
COPY --from=builder /app/dist ./dist
|
||||||
|
|
||||||
|
EXPOSE {port}
|
||||||
|
CMD ["npm", "run", "start"]
|
||||||
|
'''
|
||||||
|
|
||||||
|
container_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(container_dir / "Dockerfile", "w") as f:
|
||||||
|
f.write(dockerfile_content)
|
||||||
|
|
||||||
|
|
||||||
|
def create_build_script(container_dir: Path, name: str) -> None:
|
||||||
|
"""Create build.sh script"""
|
||||||
|
build_script = f'''#!/usr/bin/env bash
|
||||||
|
# Build cerc/{name}
|
||||||
|
|
||||||
|
source ${{CERC_CONTAINER_BASE_DIR}}/build-base.sh
|
||||||
|
|
||||||
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${{BASH_SOURCE[0]}}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
|
docker build -t cerc/{name}:local \\
|
||||||
|
-f ${{SCRIPT_DIR}}/Dockerfile \\
|
||||||
|
${{build_command_args}} \\
|
||||||
|
${{CERC_REPO_BASE_DIR}}/{name}
|
||||||
|
'''
|
||||||
|
|
||||||
|
build_path = container_dir / "build.sh"
|
||||||
|
with open(build_path, "w") as f:
|
||||||
|
f.write(build_script)
|
||||||
|
|
||||||
|
# Make executable
|
||||||
|
os.chmod(build_path, 0o755)
|
||||||
|
|
||||||
|
|
||||||
|
def create_compose_file(compose_dir: Path, name: str, template: dict) -> None:
|
||||||
|
"""Create docker-compose file"""
|
||||||
|
port = template.get("port", 3000)
|
||||||
|
|
||||||
|
compose_content = {
|
||||||
|
"version": "3.8",
|
||||||
|
"services": {
|
||||||
|
name: {
|
||||||
|
"image": f"cerc/{name}:local",
|
||||||
|
"restart": "unless-stopped",
|
||||||
|
"ports": [f"${{HOST_PORT:-{port}}}:{port}"],
|
||||||
|
"environment": {
|
||||||
|
"NODE_ENV": "${NODE_ENV:-production}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(compose_dir / f"docker-compose-{name}.yml", "w") as f:
|
||||||
|
get_yaml().dump(compose_content, f)
|
||||||
|
|
||||||
|
|
||||||
|
def update_list_file(data_dir: Path, filename: str, entry: str) -> None:
|
||||||
|
"""Add entry to a list file if not already present"""
|
||||||
|
list_path = data_dir / filename
|
||||||
|
|
||||||
|
# Read existing entries
|
||||||
|
existing = set()
|
||||||
|
if list_path.exists():
|
||||||
|
with open(list_path, "r") as f:
|
||||||
|
existing = set(line.strip() for line in f if line.strip())
|
||||||
|
|
||||||
|
# Add new entry
|
||||||
|
if entry not in existing:
|
||||||
|
with open(list_path, "a") as f:
|
||||||
|
f.write(f"{entry}\n")
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("--name", required=True, help="Name of the new stack (lowercase, hyphens)")
|
||||||
|
@click.option("--type", "stack_type", default="webapp",
|
||||||
|
type=click.Choice(list(STACK_TEMPLATES.keys())),
|
||||||
|
help="Stack template type")
|
||||||
|
@click.option("--repo", help="Git repository URL (e.g., github.com/org/repo)")
|
||||||
|
@click.option("--force", is_flag=True, help="Overwrite existing files")
|
||||||
|
@click.pass_context
|
||||||
|
def command(ctx, name: str, stack_type: str, repo: str, force: bool):
|
||||||
|
"""Create a new stack with all required files.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-app --type webapp
|
||||||
|
|
||||||
|
laconic-so create-stack --name my-service --type service --repo github.com/org/repo
|
||||||
|
"""
|
||||||
|
# Validate
|
||||||
|
validate_stack_name(name)
|
||||||
|
|
||||||
|
template = STACK_TEMPLATES[stack_type]
|
||||||
|
data_dir = get_data_dir()
|
||||||
|
|
||||||
|
# Define paths
|
||||||
|
stack_dir = data_dir / "stacks" / name
|
||||||
|
container_dir = data_dir / "container-build" / f"cerc-{name}"
|
||||||
|
compose_dir = data_dir / "compose"
|
||||||
|
|
||||||
|
# Check for existing files
|
||||||
|
if not force:
|
||||||
|
if stack_dir.exists():
|
||||||
|
error_exit(f"Stack already exists: {stack_dir}\nUse --force to overwrite")
|
||||||
|
if container_dir.exists():
|
||||||
|
error_exit(f"Container build dir exists: {container_dir}\nUse --force to overwrite")
|
||||||
|
|
||||||
|
# Dry run check
|
||||||
|
if opts.o.dry_run:
|
||||||
|
print(f"Would create stack '{name}' with template '{stack_type}':")
|
||||||
|
print(f" - {stack_dir}/stack.yml")
|
||||||
|
print(f" - {container_dir}/Dockerfile")
|
||||||
|
print(f" - {container_dir}/build.sh")
|
||||||
|
print(f" - {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
print(f" - Update repository-list.txt")
|
||||||
|
print(f" - Update container-image-list.txt")
|
||||||
|
print(f" - Update pod-list.txt")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create files
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"Creating stack '{name}' with template '{stack_type}'...")
|
||||||
|
|
||||||
|
create_stack_yml(stack_dir, name, template, repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {stack_dir}/stack.yml")
|
||||||
|
|
||||||
|
create_dockerfile(container_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/Dockerfile")
|
||||||
|
|
||||||
|
create_build_script(container_dir, name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {container_dir}/build.sh")
|
||||||
|
|
||||||
|
create_compose_file(compose_dir, name, template)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Created {compose_dir}/docker-compose-{name}.yml")
|
||||||
|
|
||||||
|
# Update list files
|
||||||
|
if repo:
|
||||||
|
update_list_file(data_dir, "repository-list.txt", repo)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {repo} to repository-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "container-image-list.txt", f"cerc/{name}")
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added cerc/{name} to container-image-list.txt")
|
||||||
|
|
||||||
|
update_list_file(data_dir, "pod-list.txt", name)
|
||||||
|
if opts.o.verbose:
|
||||||
|
print(f" Added {name} to pod-list.txt")
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
if not opts.o.quiet:
|
||||||
|
print(f"\nStack '{name}' created successfully!")
|
||||||
|
print(f"\nNext steps:")
|
||||||
|
print(f" 1. Edit {stack_dir}/stack.yml")
|
||||||
|
print(f" 2. Customize {container_dir}/Dockerfile")
|
||||||
|
print(f" 3. Run: laconic-so --stack {name} build-containers")
|
||||||
|
print(f" 4. Run: laconic-so --stack {name} deploy-system up")
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Register Command in main.py
|
||||||
|
|
||||||
|
**Edit `stack_orchestrator/main.py`**
|
||||||
|
|
||||||
|
Add import:
|
||||||
|
```python
|
||||||
|
from stack_orchestrator.create import create_stack
|
||||||
|
```
|
||||||
|
|
||||||
|
Add command registration (after line ~78):
|
||||||
|
```python
|
||||||
|
cli.add_command(create_stack.command, "create-stack")
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Step 1: Create module structure
|
||||||
|
```bash
|
||||||
|
mkdir -p stack_orchestrator/create
|
||||||
|
touch stack_orchestrator/create/__init__.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Create the command file
|
||||||
|
Create `stack_orchestrator/create/create_stack.py` with the code above.
|
||||||
|
|
||||||
|
### Step 3: Register in main.py
|
||||||
|
Add the import and `cli.add_command()` line.
|
||||||
|
|
||||||
|
### Step 4: Test the command
|
||||||
|
```bash
|
||||||
|
# Show help
|
||||||
|
laconic-so create-stack --help
|
||||||
|
|
||||||
|
# Dry run
|
||||||
|
laconic-so --dry-run create-stack --name test-app --type webapp
|
||||||
|
|
||||||
|
# Create a stack
|
||||||
|
laconic-so create-stack --name test-app --type webapp --repo github.com/org/test-app
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
ls -la stack_orchestrator/data/stacks/test-app/
|
||||||
|
cat stack_orchestrator/data/stacks/test-app/stack.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Template Types
|
||||||
|
|
||||||
|
| Type | Base Image | Port | Use Case |
|
||||||
|
|------|------------|------|----------|
|
||||||
|
| `webapp` | node:20-bullseye-slim | 3000 | React/Vue/Next.js apps |
|
||||||
|
| `service` | python:3.11-slim | 8080 | Python backend services |
|
||||||
|
| `empty` | none | none | Custom from scratch |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
1. **Interactive mode** - Prompt for values if not provided
|
||||||
|
2. **More templates** - Go, Rust, database stacks
|
||||||
|
3. **Template from existing** - `--from-stack existing-stack`
|
||||||
|
4. **External stack support** - Create in custom directory
|
||||||
|
5. **Validation command** - `laconic-so validate-stack --name my-stack`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `stack_orchestrator/create/__init__.py` | New (empty) |
|
||||||
|
| `stack_orchestrator/create/create_stack.py` | New (command implementation) |
|
||||||
|
| `stack_orchestrator/main.py` | Add import and `cli.add_command()` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Command appears in help
|
||||||
|
laconic-so --help | grep create-stack
|
||||||
|
|
||||||
|
# 2. Dry run works
|
||||||
|
laconic-so --dry-run create-stack --name verify-test --type webapp
|
||||||
|
|
||||||
|
# 3. Full creation works
|
||||||
|
laconic-so create-stack --name verify-test --type webapp
|
||||||
|
ls stack_orchestrator/data/stacks/verify-test/
|
||||||
|
ls stack_orchestrator/data/container-build/cerc-verify-test/
|
||||||
|
ls stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
|
||||||
|
# 4. Build works
|
||||||
|
laconic-so --stack verify-test build-containers
|
||||||
|
|
||||||
|
# 5. Cleanup
|
||||||
|
rm -rf stack_orchestrator/data/stacks/verify-test
|
||||||
|
rm -rf stack_orchestrator/data/container-build/cerc-verify-test
|
||||||
|
rm stack_orchestrator/data/compose/docker-compose-verify-test.yml
|
||||||
|
```
|
||||||
16
TODO.md
Normal file
16
TODO.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# TODO
|
||||||
|
|
||||||
|
## Features Needed
|
||||||
|
|
||||||
|
### Update Stack Command
|
||||||
|
We need an "update stack" command in stack orchestrator and cleaner documentation regarding how to do continuous deployment with and without payments.
|
||||||
|
|
||||||
|
**Context**: Currently, `deploy init` generates a spec file and `deploy create` creates a deployment directory. The `deployment update` command (added by Thomas Lackey) only syncs env vars and restarts - it doesn't regenerate configurations. There's a gap in the workflow for updating stack configurations after initial deployment.
|
||||||
|
|
||||||
|
## Architecture Refactoring
|
||||||
|
|
||||||
|
### Separate Deployer from Stack Orchestrator CLI
|
||||||
|
The deployer logic should be decoupled from the CLI tool to allow independent development and reuse.
|
||||||
|
|
||||||
|
### Separate Stacks from Stack Orchestrator Repo
|
||||||
|
Stacks should live in their own repositories, not bundled with the orchestrator tool. This allows stacks to evolve independently and be maintained by different teams.
|
||||||
550
docs/docker-compose-deployment.md
Normal file
550
docs/docker-compose-deployment.md
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
# Docker Compose Deployment Guide
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
### What is a Deployer?
|
||||||
|
|
||||||
|
In stack-orchestrator, a **deployer** provides a uniform interface for orchestrating containerized applications. This guide focuses on Docker Compose deployments, which is the default and recommended deployment mode.
|
||||||
|
|
||||||
|
While stack-orchestrator also supports Kubernetes (`k8s`) and Kind (`k8s-kind`) deployments, those are out of scope for this guide. See the [Kubernetes Enhancements](./k8s-deployment-enhancements.md) documentation for advanced deployment options.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To deploy stacks using Docker Compose, you need:
|
||||||
|
|
||||||
|
- Docker Engine (20.10+)
|
||||||
|
- Docker Compose plugin (v2.0+)
|
||||||
|
- Python 3.8+
|
||||||
|
- stack-orchestrator installed (`laconic-so`)
|
||||||
|
|
||||||
|
**That's it!** No additional infrastructure is required. If you have Docker installed, you're ready to deploy.
|
||||||
|
|
||||||
|
## Deployment Workflow
|
||||||
|
|
||||||
|
The typical deployment workflow consists of four main steps:
|
||||||
|
|
||||||
|
1. **Setup repositories and build containers** (first time only)
|
||||||
|
2. **Initialize deployment specification**
|
||||||
|
3. **Create deployment directory**
|
||||||
|
4. **Start and manage services**
|
||||||
|
|
||||||
|
## Quick Start Example
|
||||||
|
|
||||||
|
Here's a complete example using the built-in `test` stack:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Setup (first time only)
|
||||||
|
laconic-so --stack test setup-repositories
|
||||||
|
laconic-so --stack test build-containers
|
||||||
|
|
||||||
|
# Step 2: Initialize deployment spec
|
||||||
|
laconic-so --stack test deploy init --output test-spec.yml
|
||||||
|
|
||||||
|
# Step 3: Create deployment directory
|
||||||
|
laconic-so --stack test deploy create \
|
||||||
|
--spec-file test-spec.yml \
|
||||||
|
--deployment-dir test-deployment
|
||||||
|
|
||||||
|
# Step 4: Start services
|
||||||
|
laconic-so deployment --dir test-deployment start
|
||||||
|
|
||||||
|
# View running services
|
||||||
|
laconic-so deployment --dir test-deployment ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir test-deployment logs
|
||||||
|
|
||||||
|
# Stop services (preserves data)
|
||||||
|
laconic-so deployment --dir test-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
Stack-orchestrator supports two deployment workflows:
|
||||||
|
|
||||||
|
### 1. Deployment Directory Workflow (Recommended)
|
||||||
|
|
||||||
|
This workflow creates a persistent deployment directory that contains all configuration and data.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Production deployments
|
||||||
|
- When you need to preserve configuration
|
||||||
|
- When you want to manage multiple deployments
|
||||||
|
- When you need persistent volume data
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Optionally edit eth-spec.yml to customize configuration
|
||||||
|
|
||||||
|
# Create deployment directory
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir my-eth-deployment
|
||||||
|
|
||||||
|
# Start the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment start
|
||||||
|
|
||||||
|
# Manage the deployment
|
||||||
|
laconic-so deployment --dir my-eth-deployment ps
|
||||||
|
laconic-so deployment --dir my-eth-deployment logs
|
||||||
|
laconic-so deployment --dir my-eth-deployment stop
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Quick Deploy Workflow
|
||||||
|
|
||||||
|
This workflow deploys directly without creating a persistent deployment directory.
|
||||||
|
|
||||||
|
**When to use:**
|
||||||
|
- Quick testing
|
||||||
|
- Temporary deployments
|
||||||
|
- Simple stacks that don't require customization
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start the stack directly
|
||||||
|
laconic-so --stack test deploy up
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
laconic-so --stack test deploy port test 80
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack test deploy logs
|
||||||
|
|
||||||
|
# Stop (preserves volumes)
|
||||||
|
laconic-so --stack test deploy down
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so --stack test deploy down --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Real-World Example: Ethereum Fixturenet
|
||||||
|
|
||||||
|
Deploy a local Ethereum testnet with Geth and Lighthouse:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Setup (first time only)
|
||||||
|
laconic-so --stack fixturenet-eth setup-repositories
|
||||||
|
laconic-so --stack fixturenet-eth build-containers
|
||||||
|
|
||||||
|
# Initialize with default configuration
|
||||||
|
laconic-so --stack fixturenet-eth deploy init --output eth-spec.yml
|
||||||
|
|
||||||
|
# Create deployment
|
||||||
|
laconic-so --stack fixturenet-eth deploy create \
|
||||||
|
--spec-file eth-spec.yml \
|
||||||
|
--deployment-dir fixturenet-eth-deployment
|
||||||
|
|
||||||
|
# Start the network
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment ps
|
||||||
|
|
||||||
|
# Access logs from specific service
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment logs fixturenet-eth-geth-1
|
||||||
|
|
||||||
|
# Stop the network (preserves blockchain data)
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop
|
||||||
|
|
||||||
|
# Start again - blockchain data is preserved
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment start
|
||||||
|
|
||||||
|
# Clean up everything including data
|
||||||
|
laconic-so deployment --dir fixturenet-eth-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Passing Configuration Parameters
|
||||||
|
|
||||||
|
Configuration can be passed in three ways:
|
||||||
|
|
||||||
|
**1. At init time via `--config` flag:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack test deploy init --output spec.yml \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Edit the spec file after init:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize
|
||||||
|
laconic-so --stack test deploy init --output spec.yml
|
||||||
|
|
||||||
|
# Edit spec.yml
|
||||||
|
vim spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Example spec.yml:
|
||||||
|
```yaml
|
||||||
|
stack: test
|
||||||
|
config:
|
||||||
|
PARAM1: value1
|
||||||
|
PARAM2: value2
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Docker Compose defaults:**
|
||||||
|
|
||||||
|
Environment variables defined in the stack's `docker-compose-*.yml` files are used as defaults. Configuration from the spec file overrides these defaults.
|
||||||
|
|
||||||
|
### Port Mapping
|
||||||
|
|
||||||
|
By default, services are accessible on randomly assigned host ports. To find the mapped port:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the host port for container port 80 on service 'webapp'
|
||||||
|
laconic-so deployment --dir my-deployment port webapp 80
|
||||||
|
|
||||||
|
# Output example: 0.0.0.0:32768
|
||||||
|
```
|
||||||
|
|
||||||
|
To configure fixed ports, edit the spec file before creating the deployment:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8080:80' # Maps host port 8080 to container port 80
|
||||||
|
api:
|
||||||
|
- '3000:3000'
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create the deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack my-stack deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
### Volume Persistence
|
||||||
|
|
||||||
|
Volumes are preserved between stop/start cycles by default:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop but keep data
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Start again - data is still there
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
To completely remove all data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop and delete all volumes
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
Volume data is stored in `<deployment-dir>/data/`.
|
||||||
|
|
||||||
|
## Common Operations
|
||||||
|
|
||||||
|
### Viewing Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# All services, continuous follow
|
||||||
|
laconic-so deployment --dir my-deployment logs --follow
|
||||||
|
|
||||||
|
# Last 100 lines from all services
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 100
|
||||||
|
|
||||||
|
# Specific service only
|
||||||
|
laconic-so deployment --dir my-deployment logs webapp
|
||||||
|
|
||||||
|
# Combine options
|
||||||
|
laconic-so deployment --dir my-deployment logs --tail 50 --follow webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
### Executing Commands in Containers
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Execute a command in a running service
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp ls -la
|
||||||
|
|
||||||
|
# Interactive shell
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp /bin/bash
|
||||||
|
|
||||||
|
# Run command with specific environment variables
|
||||||
|
laconic-so deployment --dir my-deployment exec webapp env VAR=value command
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checking Service Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all running services
|
||||||
|
laconic-so deployment --dir my-deployment ps
|
||||||
|
|
||||||
|
# Check using Docker directly
|
||||||
|
docker ps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updating a Running Deployment
|
||||||
|
|
||||||
|
If you need to change configuration after deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Edit the spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
|
||||||
|
# 2. Regenerate configuration
|
||||||
|
laconic-so deployment --dir my-deployment update
|
||||||
|
|
||||||
|
# 3. Restart services to apply changes
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Service Deployments
|
||||||
|
|
||||||
|
Many stacks deploy multiple services that work together:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deploy a stack with multiple services
|
||||||
|
laconic-so --stack laconicd-with-console deploy init --output spec.yml
|
||||||
|
laconic-so --stack laconicd-with-console deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir laconicd-deployment
|
||||||
|
|
||||||
|
laconic-so deployment --dir laconicd-deployment start
|
||||||
|
|
||||||
|
# View all services
|
||||||
|
laconic-so deployment --dir laconicd-deployment ps
|
||||||
|
|
||||||
|
# View logs from specific services
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs laconicd
|
||||||
|
laconic-so deployment --dir laconicd-deployment logs console
|
||||||
|
```
|
||||||
|
|
||||||
|
## ConfigMaps
|
||||||
|
|
||||||
|
ConfigMaps allow you to mount configuration files into containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create the config directory in your deployment
|
||||||
|
mkdir -p my-deployment/data/my-config
|
||||||
|
echo "database_url=postgres://localhost" > my-deployment/data/my-config/app.conf
|
||||||
|
|
||||||
|
# 2. Reference in spec file
|
||||||
|
vim my-deployment/spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Add to spec.yml:
|
||||||
|
```yaml
|
||||||
|
configmaps:
|
||||||
|
my-config: ./data/my-config
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 3. Restart to apply
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
The files will be mounted in the container at `/config/` (or as specified by the stack).
|
||||||
|
|
||||||
|
## Deployment Directory Structure
|
||||||
|
|
||||||
|
A typical deployment directory contains:
|
||||||
|
|
||||||
|
```
|
||||||
|
my-deployment/
|
||||||
|
├── compose/
|
||||||
|
│ └── docker-compose-*.yml # Generated compose files
|
||||||
|
├── config.env # Environment variables
|
||||||
|
├── deployment.yml # Deployment metadata
|
||||||
|
├── spec.yml # Deployment specification
|
||||||
|
└── data/ # Volume mounts and configs
|
||||||
|
├── service-data/ # Persistent service data
|
||||||
|
└── config-maps/ # ConfigMap files
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
**Problem: "Cannot connect to Docker daemon"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure Docker is running
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Start Docker if needed (macOS)
|
||||||
|
open -a Docker
|
||||||
|
|
||||||
|
# Start Docker (Linux)
|
||||||
|
sudo systemctl start docker
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Port already in use"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Either stop the conflicting service or use different ports
|
||||||
|
# Edit spec.yml before creating deployment:
|
||||||
|
|
||||||
|
network:
|
||||||
|
ports:
|
||||||
|
webapp:
|
||||||
|
- '8081:80' # Use 8081 instead of 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: "Image not found"**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build containers first
|
||||||
|
laconic-so --stack your-stack build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Volumes not persisting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if you used --delete-volumes when stopping
|
||||||
|
# Volume data is in: <deployment-dir>/data/
|
||||||
|
|
||||||
|
# Don't use --delete-volumes if you want to keep data:
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
|
||||||
|
# Only use --delete-volumes when you want to reset completely:
|
||||||
|
laconic-so deployment --dir my-deployment stop --delete-volumes
|
||||||
|
```
|
||||||
|
|
||||||
|
**Problem: Services not starting**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check logs for errors
|
||||||
|
laconic-so deployment --dir my-deployment logs
|
||||||
|
|
||||||
|
# Check Docker container status
|
||||||
|
docker ps -a
|
||||||
|
|
||||||
|
# Try stopping and starting again
|
||||||
|
laconic-so deployment --dir my-deployment stop
|
||||||
|
laconic-so deployment --dir my-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
### Inspecting Deployment State
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check deployment directory structure
|
||||||
|
ls -la my-deployment/
|
||||||
|
|
||||||
|
# Check running containers
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
# Check container details
|
||||||
|
docker inspect <container-name>
|
||||||
|
|
||||||
|
# Check networks
|
||||||
|
docker network ls
|
||||||
|
|
||||||
|
# Check volumes
|
||||||
|
docker volume ls
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Commands Reference
|
||||||
|
|
||||||
|
### Stack Operations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone required repositories
|
||||||
|
laconic-so --stack <name> setup-repositories
|
||||||
|
|
||||||
|
# Build container images
|
||||||
|
laconic-so --stack <name> build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Initialization
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize deployment spec with defaults
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file>
|
||||||
|
|
||||||
|
# Initialize with configuration
|
||||||
|
laconic-so --stack <name> deploy init --output <spec-file> \
|
||||||
|
--config PARAM1=value1,PARAM2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Creation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create deployment directory from spec
|
||||||
|
laconic-so --stack <name> deploy create \
|
||||||
|
--spec-file <spec-file> \
|
||||||
|
--deployment-dir <dir>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment Management
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start all services
|
||||||
|
laconic-so deployment --dir <dir> start
|
||||||
|
|
||||||
|
# Stop services (preserves volumes)
|
||||||
|
laconic-so deployment --dir <dir> stop
|
||||||
|
|
||||||
|
# Stop and remove volumes
|
||||||
|
laconic-so deployment --dir <dir> stop --delete-volumes
|
||||||
|
|
||||||
|
# List running services
|
||||||
|
laconic-so deployment --dir <dir> ps
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so deployment --dir <dir> logs [--tail N] [--follow] [service]
|
||||||
|
|
||||||
|
# Show mapped port
|
||||||
|
laconic-so deployment --dir <dir> port <service> <private-port>
|
||||||
|
|
||||||
|
# Execute command in service
|
||||||
|
laconic-so deployment --dir <dir> exec <service> <command>
|
||||||
|
|
||||||
|
# Update configuration
|
||||||
|
laconic-so deployment --dir <dir> update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Quick Deploy Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start stack directly
|
||||||
|
laconic-so --stack <name> deploy up
|
||||||
|
|
||||||
|
# Stop stack
|
||||||
|
laconic-so --stack <name> deploy down [--delete-volumes]
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
laconic-so --stack <name> deploy logs
|
||||||
|
|
||||||
|
# Show port mapping
|
||||||
|
laconic-so --stack <name> deploy port <service> <port>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [CLI Reference](./cli.md) - Complete CLI command documentation
|
||||||
|
- [Adding a New Stack](./adding-a-new-stack.md) - Creating custom stacks
|
||||||
|
- [Specification](./spec.md) - Internal structure and design
|
||||||
|
- [Kubernetes Enhancements](./k8s-deployment-enhancements.md) - Advanced K8s deployment options
|
||||||
|
- [Web App Deployment](./webapp.md) - Deploying web applications
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
For more examples, see the test scripts:
|
||||||
|
- `scripts/quick-deploy-test.sh` - Quick deployment example
|
||||||
|
- `tests/deploy/run-deploy-test.sh` - Comprehensive test showing all features
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
- Docker Compose is the default and recommended deployment mode
|
||||||
|
- Two workflows: deployment directory (recommended) or quick deploy
|
||||||
|
- The standard workflow is: setup → build → init → create → start
|
||||||
|
- Configuration is flexible with multiple override layers
|
||||||
|
- Volume persistence is automatic unless explicitly deleted
|
||||||
|
- All deployment state is contained in the deployment directory
|
||||||
|
- For Kubernetes deployments, see separate K8s documentation
|
||||||
|
|
||||||
|
You're now ready to deploy stacks using stack-orchestrator with Docker Compose!
|
||||||
@ -1,9 +1,9 @@
|
|||||||
# Fetching pre-built container images
|
# Fetching pre-built container images
|
||||||
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
When Stack Orchestrator deploys a stack containing a suite of one or more containers it expects images for those containers to be on the local machine with a tag of the form `<image-name>:local` Images for these containers can be built from source (and optionally base container images from public registries) with the `build-containers` subcommand.
|
||||||
|
|
||||||
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
However, the task of building a large number of containers from source may consume considerable time and machine resources. This is where the `fetch-containers` subcommand steps in. It is designed to work exactly like `build-containers` but instead the images, pre-built, are fetched from an image registry then re-tagged for deployment. It can be used in place of `build-containers` for any stack provided the necessary containers, built for the local machine architecture (e.g. arm64 or x86-64) have already been published in an image registry.
|
||||||
## Usage
|
## Usage
|
||||||
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
To use `fetch-containers`, provide an image registry path, a username and token/password with read access to the registry, and optionally specify `--force-local-overwrite`. If this argument is not specified, if there is already a locally built or previously fetched image for a stack container on the machine, it will not be overwritten and a warning issued.
|
||||||
```
|
```
|
||||||
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
$ laconic-so --stack mobymask-v3-demo fetch-containers --image-registry git.vdb.to/cerc-io --registry-username <registry-user> --registry-token <registry-token> --force-local-overwrite
|
||||||
```
|
```
|
||||||
|
|||||||
@ -7,7 +7,7 @@ Deploy a local Gitea server, publish NPM packages to it, then use those packages
|
|||||||
```bash
|
```bash
|
||||||
laconic-so --stack build-support build-containers
|
laconic-so --stack build-support build-containers
|
||||||
laconic-so --stack package-registry setup-repositories
|
laconic-so --stack package-registry setup-repositories
|
||||||
laconic-so --stack package-registry build-containers
|
laconic-so --stack package-registry build-containers
|
||||||
laconic-so --stack package-registry deploy up
|
laconic-so --stack package-registry deploy up
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
113
docs/helm-chart-generation.md
Normal file
113
docs/helm-chart-generation.md
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Helm Chart Generation
|
||||||
|
|
||||||
|
Generate Kubernetes Helm charts from stack compose files using Kompose.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Install Kompose:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Linux
|
||||||
|
curl -L https://github.com/kubernetes/kompose/releases/download/v1.34.0/kompose-linux-amd64 -o kompose
|
||||||
|
chmod +x kompose
|
||||||
|
sudo mv kompose /usr/local/bin/
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
brew install kompose
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
kompose version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### 1. Create spec file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Generate Helm chart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack <stack-name> deploy create \
|
||||||
|
--spec-file spec.yml \
|
||||||
|
--deployment-dir my-deployment \
|
||||||
|
--helm-chart
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Deploy to Kubernetes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install my-release my-deployment/chart
|
||||||
|
kubectl get pods -n zenith
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Structure
|
||||||
|
|
||||||
|
```bash
|
||||||
|
my-deployment/
|
||||||
|
├── spec.yml # Reference
|
||||||
|
├── stack.yml # Reference
|
||||||
|
└── chart/ # Helm chart
|
||||||
|
├── Chart.yaml
|
||||||
|
├── README.md
|
||||||
|
└── templates/
|
||||||
|
└── *.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate chart for stage1-zenithd
|
||||||
|
laconic-so --stack stage1-zenithd deploy --deploy-to k8s init \
|
||||||
|
--kube-config ~/.kube/config \
|
||||||
|
--output stage1-spec.yml
|
||||||
|
|
||||||
|
laconic-so --stack stage1-zenithd deploy create \
|
||||||
|
--spec-file stage1-spec.yml \
|
||||||
|
--deployment-dir stage1-deployment \
|
||||||
|
--helm-chart
|
||||||
|
|
||||||
|
# Deploy
|
||||||
|
helm install stage1-zenithd stage1-deployment/chart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Production Deployment (TODO)
|
||||||
|
|
||||||
|
### Local Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Access services using port-forward
|
||||||
|
kubectl port-forward service/zenithd 26657:26657
|
||||||
|
kubectl port-forward service/nginx-api-proxy 1317:80
|
||||||
|
kubectl port-forward service/cosmos-explorer 4173:4173
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production Access Options
|
||||||
|
|
||||||
|
- Option 1: Ingress + cert-manager (Recommended)
|
||||||
|
- Install ingress-nginx + cert-manager
|
||||||
|
- Point DNS to cluster LoadBalancer IP
|
||||||
|
- Auto-provisions Let's Encrypt TLS certs
|
||||||
|
- Access: `https://api.zenith.example.com`
|
||||||
|
- Option 2: Cloud LoadBalancer
|
||||||
|
- Use cloud provider's LoadBalancer service type
|
||||||
|
- Point DNS to assigned external IP
|
||||||
|
- Manual TLS cert management
|
||||||
|
- Option 3: Bare Metal (MetalLB + Ingress)
|
||||||
|
- MetalLB provides LoadBalancer IPs from local network
|
||||||
|
- Same Ingress setup as cloud
|
||||||
|
- Option 4: NodePort + External Proxy
|
||||||
|
- Expose services on 30000-32767 range
|
||||||
|
- External nginx/Caddy proxies 80/443 → NodePort
|
||||||
|
- Manual cert management
|
||||||
|
|
||||||
|
### Changes Needed
|
||||||
|
|
||||||
|
- Add Ingress template to charts
|
||||||
|
- Add TLS configuration to values.yaml
|
||||||
|
- Document cert-manager setup
|
||||||
|
- Add production deployment guide
|
||||||
@ -24,4 +24,3 @@ node-tolerations:
|
|||||||
value: typeb
|
value: typeb
|
||||||
```
|
```
|
||||||
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
||||||
|
|
||||||
|
|||||||
@ -26,4 +26,3 @@ $ ./scripts/tag_new_release.sh 1 0 17
|
|||||||
$ ./scripts/build_shiv_package.sh
|
$ ./scripts/build_shiv_package.sh
|
||||||
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
$ ./scripts/publish_shiv_package_github.sh 1 0 17
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,9 @@ Note: this page is out of date (but still useful) - it will no longer be useful
|
|||||||
|
|
||||||
## Implementation
|
## Implementation
|
||||||
|
|
||||||
The orchestrator's operation is driven by files shown below.
|
The orchestrator's operation is driven by files shown below.
|
||||||
|
|
||||||
- `repository-list.txt` contains the list of git repositories;
|
- `repository-list.txt` contains the list of git repositories;
|
||||||
- `container-image-list.txt` contains the list of container image names
|
- `container-image-list.txt` contains the list of container image names
|
||||||
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
- `pod-list.txt` specifies the set of compose components (corresponding to individual docker-compose-xxx.yml files which may in turn specify more than one container).
|
||||||
- `container-build/` contains the files required to build each container image
|
- `container-build/` contains the files required to build each container image
|
||||||
|
|||||||
@ -7,7 +7,7 @@ compilation and static page generation are separated in the `build-webapp` and `
|
|||||||
|
|
||||||
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
This offers much more flexibilty than standard Next.js build methods, since any environment variables accessed
|
||||||
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
via `process.env`, whether for pages or for API, will have values drawn from their runtime deployment environment,
|
||||||
not their build environment.
|
not their build environment.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
|
|||||||
128
laconic-network-deployment.md
Normal file
128
laconic-network-deployment.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Deploying to the Laconic Network
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Laconic network uses a **registry-based deployment model** where everything is published as blockchain records.
|
||||||
|
|
||||||
|
## Key Documentation in stack-orchestrator
|
||||||
|
|
||||||
|
- `docs/laconicd-with-console.md` - Setting up a laconicd network
|
||||||
|
- `docs/webapp.md` - Webapp building/running
|
||||||
|
- `stack_orchestrator/deploy/webapp/` - Implementation (14 modules)
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
### LRN (Laconic Resource Name)
|
||||||
|
Format: `lrn://laconic/[namespace]/[name]`
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `lrn://laconic/deployers/my-deployer-name`
|
||||||
|
- `lrn://laconic/dns/example.com`
|
||||||
|
- `lrn://laconic/deployments/example.com`
|
||||||
|
|
||||||
|
### Registry Record Types
|
||||||
|
|
||||||
|
| Record Type | Purpose |
|
||||||
|
|-------------|---------|
|
||||||
|
| `ApplicationRecord` | Published app metadata |
|
||||||
|
| `WebappDeployer` | Deployment service offering |
|
||||||
|
| `ApplicationDeploymentRequest` | User's request to deploy |
|
||||||
|
| `ApplicationDeploymentAuction` | Optional bidding for deployers |
|
||||||
|
| `ApplicationDeploymentRecord` | Completed deployment result |
|
||||||
|
|
||||||
|
## Deployment Workflows
|
||||||
|
|
||||||
|
### 1. Direct Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentRequest
|
||||||
|
→ targets specific WebappDeployer (by LRN)
|
||||||
|
→ includes payment TX hash
|
||||||
|
→ Deployer picks up request, builds, deploys, publishes result
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Auction-Based Deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
User publishes ApplicationDeploymentAuction
|
||||||
|
→ Deployers bid (commit/reveal phases)
|
||||||
|
→ Winner selected
|
||||||
|
→ User publishes request targeting winner
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key CLI Commands
|
||||||
|
|
||||||
|
### Publish a Deployer Service
|
||||||
|
```bash
|
||||||
|
laconic-so publish-webapp-deployer --laconic-config config.yml \
|
||||||
|
--api-url https://deployer-api.example.com \
|
||||||
|
--name my-deployer \
|
||||||
|
--payment-address laconic1... \
|
||||||
|
--minimum-payment 1000alnt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Request Deployment (User Side)
|
||||||
|
```bash
|
||||||
|
laconic-so request-webapp-deployment --laconic-config config.yml \
|
||||||
|
--app lrn://laconic/apps/my-app \
|
||||||
|
--deployer lrn://laconic/deployers/xyz \
|
||||||
|
--make-payment auto
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Deployer Service (Deployer Side)
|
||||||
|
```bash
|
||||||
|
laconic-so deploy-webapp-from-registry --laconic-config config.yml --discover
|
||||||
|
```
|
||||||
|
|
||||||
|
## Laconic Config File
|
||||||
|
|
||||||
|
All tools require a laconic config file (`laconic.toml`):
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[cosmos]
|
||||||
|
address_prefix = "laconic"
|
||||||
|
chain_id = "laconic_9000-1"
|
||||||
|
endpoint = "http://localhost:26657"
|
||||||
|
key = "<account-name>"
|
||||||
|
password = "<account-password>"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting Up a Local Laconicd Network
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone and build
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
laconic-so --stack fixturenet-laconic-loaded deploy create
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment start
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
laconic-so deployment --dir laconic-loaded-deployment exec cli "laconic registry status"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Implementation Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `publish_webapp_deployer.py` | Register deployment service on network |
|
||||||
|
| `publish_deployment_auction.py` | Create auction for deployers to bid on |
|
||||||
|
| `handle_deployment_auction.py` | Monitor and bid on auctions (deployer-side) |
|
||||||
|
| `request_webapp_deployment.py` | Create deployment request (user-side) |
|
||||||
|
| `deploy_webapp_from_registry.py` | Process requests and deploy (deployer-side) |
|
||||||
|
| `request_webapp_undeployment.py` | Request app removal |
|
||||||
|
| `undeploy_webapp_from_registry.py` | Process removal requests |
|
||||||
|
| `util.py` | LaconicRegistryClient - all registry interactions |
|
||||||
|
|
||||||
|
## Payment System
|
||||||
|
|
||||||
|
- **Token Denom**: `alnt` (Laconic network tokens)
|
||||||
|
- **Payment Options**:
|
||||||
|
- `--make-payment`: Create new payment with amount (or "auto" for deployer's minimum)
|
||||||
|
- `--use-payment`: Reference existing payment TX
|
||||||
|
|
||||||
|
## What's NOT Well-Documented
|
||||||
|
|
||||||
|
1. No end-to-end tutorial for full deployment workflow
|
||||||
|
2. Stack publishing (vs webapp) process unclear
|
||||||
|
3. LRN naming conventions not formally specified
|
||||||
|
4. Payment economics and token mechanics
|
||||||
110
pyproject.toml
Normal file
110
pyproject.toml
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "laconic-stack-orchestrator"
|
||||||
|
version = "1.1.0"
|
||||||
|
description = "Orchestrates deployment of the Laconic stack"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "GNU Affero General Public License"}
|
||||||
|
authors = [
|
||||||
|
{name = "Cerc", email = "info@cerc.io"}
|
||||||
|
]
|
||||||
|
requires-python = ">=3.8"
|
||||||
|
classifiers = [
|
||||||
|
"Programming Language :: Python :: 3.8",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"python-decouple>=3.8",
|
||||||
|
"python-dotenv==1.0.0",
|
||||||
|
"GitPython>=3.1.32",
|
||||||
|
"tqdm>=4.65.0",
|
||||||
|
"python-on-whales>=0.64.0",
|
||||||
|
"click>=8.1.6",
|
||||||
|
"PyYAML>=6.0.1",
|
||||||
|
"ruamel.yaml>=0.17.32",
|
||||||
|
"pydantic==1.10.9",
|
||||||
|
"tomli==2.0.1",
|
||||||
|
"validators==0.22.0",
|
||||||
|
"kubernetes>=28.1.0",
|
||||||
|
"humanfriendly>=10.0",
|
||||||
|
"python-gnupg>=0.5.2",
|
||||||
|
"requests>=2.3.2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0.0",
|
||||||
|
"pytest-cov>=4.0.0",
|
||||||
|
"black>=22.0.0",
|
||||||
|
"flake8>=5.0.0",
|
||||||
|
"pyright>=1.1.0",
|
||||||
|
"yamllint>=1.28.0",
|
||||||
|
"pre-commit>=3.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
laconic-so = "stack_orchestrator.main:cli"
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://git.vdb.to/cerc-io/stack-orchestrator"
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
|
||||||
|
[tool.setuptools.package-data]
|
||||||
|
"*" = ["data/**"]
|
||||||
|
|
||||||
|
[tool.black]
|
||||||
|
line-length = 88
|
||||||
|
target-version = ['py38']
|
||||||
|
|
||||||
|
[tool.flake8]
|
||||||
|
max-line-length = 88
|
||||||
|
extend-ignore = ["E203", "W503", "E402"]
|
||||||
|
|
||||||
|
[tool.pyright]
|
||||||
|
pythonVersion = "3.9"
|
||||||
|
typeCheckingMode = "basic"
|
||||||
|
reportMissingImports = "none"
|
||||||
|
reportMissingModuleSource = "none"
|
||||||
|
reportUnusedImport = "error"
|
||||||
|
include = ["stack_orchestrator/**/*.py", "tests/**/*.py"]
|
||||||
|
exclude = ["**/build/**", "**/__pycache__/**"]
|
||||||
|
|
||||||
|
[tool.mypy]
|
||||||
|
python_version = "3.8"
|
||||||
|
warn_return_any = true
|
||||||
|
warn_unused_configs = true
|
||||||
|
disallow_untyped_defs = true
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
python_classes = ["Test*"]
|
||||||
|
python_functions = ["test_*"]
|
||||||
|
markers = [
|
||||||
|
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||||
|
"e2e: marks tests as end-to-end (requires real infrastructure)",
|
||||||
|
]
|
||||||
|
addopts = [
|
||||||
|
"--cov",
|
||||||
|
"--cov-report=term-missing",
|
||||||
|
"--cov-report=html",
|
||||||
|
"--strict-markers",
|
||||||
|
]
|
||||||
|
asyncio_default_fixture_loop_scope = "function"
|
||||||
|
|
||||||
|
[tool.coverage.run]
|
||||||
|
source = ["stack_orchestrator"]
|
||||||
|
disable_warnings = ["couldnt-parse"]
|
||||||
|
|
||||||
|
[tool.coverage.report]
|
||||||
|
exclude_lines = [
|
||||||
|
"pragma: no cover",
|
||||||
|
"def __repr__",
|
||||||
|
"raise AssertionError",
|
||||||
|
"raise NotImplementedError",
|
||||||
|
]
|
||||||
9
pyrightconfig.json
Normal file
9
pyrightconfig.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"pythonVersion": "3.9",
|
||||||
|
"typeCheckingMode": "basic",
|
||||||
|
"reportMissingImports": "none",
|
||||||
|
"reportMissingModuleSource": "none",
|
||||||
|
"reportUnusedImport": "error",
|
||||||
|
"include": ["stack_orchestrator/**/*.py", "tests/**/*.py"],
|
||||||
|
"exclude": ["**/build/**", "**/__pycache__/**"]
|
||||||
|
}
|
||||||
@ -4,7 +4,7 @@
|
|||||||
# https://github.com/cerc-io/github-release-api
|
# https://github.com/cerc-io/github-release-api
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
@ -94,7 +94,7 @@ sudo apt -y install jq
|
|||||||
# laconic-so depends on git
|
# laconic-so depends on git
|
||||||
sudo apt -y install git
|
sudo apt -y install git
|
||||||
# curl used below
|
# curl used below
|
||||||
sudo apt -y install curl
|
sudo apt -y install curl
|
||||||
# docker repo add depends on gnupg and updated ca-certificates
|
# docker repo add depends on gnupg and updated ca-certificates
|
||||||
sudo apt -y install ca-certificates gnupg
|
sudo apt -y install ca-certificates gnupg
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
# Uses this script package to tag a new release:
|
# Uses this script package to tag a new release:
|
||||||
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
# User must define: CERC_GH_RELEASE_SCRIPTS_DIR
|
||||||
# pointing to the location of that cloned repository
|
# pointing to the location of that cloned repository
|
||||||
# e.g.
|
# e.g.
|
||||||
# cd ~/projects
|
# cd ~/projects
|
||||||
# git clone https://github.com/cerc-io/github-release-api
|
# git clone https://github.com/cerc-io/github-release-api
|
||||||
# cd ./stack-orchestrator
|
# cd ./stack-orchestrator
|
||||||
|
|||||||
26
setup.py
26
setup.py
@ -1,5 +1,7 @@
|
|||||||
# See https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
# See
|
||||||
|
# https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r", encoding="utf-8") as fh:
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
with open("requirements.txt", "r", encoding="utf-8") as fh:
|
||||||
@ -7,26 +9,26 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|||||||
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
with open("stack_orchestrator/data/version.txt", "r", encoding="utf-8") as fh:
|
||||||
version = fh.readlines()[-1].strip(" \n")
|
version = fh.readlines()[-1].strip(" \n")
|
||||||
setup(
|
setup(
|
||||||
name='laconic-stack-orchestrator',
|
name="laconic-stack-orchestrator",
|
||||||
version=version,
|
version=version,
|
||||||
author='Cerc',
|
author="Cerc",
|
||||||
author_email='info@cerc.io',
|
author_email="info@cerc.io",
|
||||||
license='GNU Affero General Public License',
|
license="GNU Affero General Public License",
|
||||||
description='Orchestrates deployment of the Laconic stack',
|
description="Orchestrates deployment of the Laconic stack",
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
url='https://git.vdb.to/cerc-io/stack-orchestrator',
|
url="https://git.vdb.to/cerc-io/stack-orchestrator",
|
||||||
py_modules=['stack_orchestrator'],
|
py_modules=["stack_orchestrator"],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[requirements],
|
install_requires=[requirements],
|
||||||
python_requires='>=3.7',
|
python_requires=">=3.7",
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
package_data={'': ['data/**']},
|
package_data={"": ["data/**"]},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 3.8",
|
"Programming Language :: Python :: 3.8",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': ['laconic-so=stack_orchestrator.main:cli'],
|
"console_scripts": ["laconic-so=stack_orchestrator.main:cli"],
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@ -23,11 +23,10 @@ def get_stack(config, stack):
|
|||||||
if stack == "package-registry":
|
if stack == "package-registry":
|
||||||
return package_registry_stack(config, stack)
|
return package_registry_stack(config, stack)
|
||||||
else:
|
else:
|
||||||
return base_stack(config, stack)
|
return default_stack(config, stack)
|
||||||
|
|
||||||
|
|
||||||
class base_stack(ABC):
|
class base_stack(ABC):
|
||||||
|
|
||||||
def __init__(self, config, stack):
|
def __init__(self, config, stack):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stack = stack
|
self.stack = stack
|
||||||
@ -41,15 +40,27 @@ class base_stack(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class package_registry_stack(base_stack):
|
class default_stack(base_stack):
|
||||||
|
"""Default stack implementation for stacks without specific handling."""
|
||||||
|
|
||||||
|
def ensure_available(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_url(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class package_registry_stack(base_stack):
|
||||||
def ensure_available(self):
|
def ensure_available(self):
|
||||||
self.url = "<no registry url set>"
|
self.url = "<no registry url set>"
|
||||||
# Check if we were given an external registry URL
|
# Check if we were given an external registry URL
|
||||||
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
url_from_environment = os.environ.get("CERC_NPM_REGISTRY_URL")
|
||||||
if url_from_environment:
|
if url_from_environment:
|
||||||
if self.config.verbose:
|
if self.config.verbose:
|
||||||
print(f"Using package registry url from CERC_NPM_REGISTRY_URL: {url_from_environment}")
|
print(
|
||||||
|
f"Using package registry url from CERC_NPM_REGISTRY_URL: "
|
||||||
|
f"{url_from_environment}"
|
||||||
|
)
|
||||||
self.url = url_from_environment
|
self.url = url_from_environment
|
||||||
else:
|
else:
|
||||||
# Otherwise we expect to use the local package-registry stack
|
# Otherwise we expect to use the local package-registry stack
|
||||||
@ -62,10 +73,16 @@ class package_registry_stack(base_stack):
|
|||||||
# TODO: get url from deploy-stack
|
# TODO: get url from deploy-stack
|
||||||
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
self.url = "http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
else:
|
else:
|
||||||
# If not, print a message about how to start it and return fail to the caller
|
# If not, print a message about how to start it and return fail to the
|
||||||
print("ERROR: The package-registry stack is not running, and no external registry "
|
# caller
|
||||||
"specified with CERC_NPM_REGISTRY_URL")
|
print(
|
||||||
print("ERROR: Start the local package registry with: laconic-so --stack package-registry deploy-system up")
|
"ERROR: The package-registry stack is not running, "
|
||||||
|
"and no external registry specified with CERC_NPM_REGISTRY_URL"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"ERROR: Start the local package registry with: "
|
||||||
|
"laconic-so --stack package-registry deploy-system up"
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -76,7 +93,9 @@ class package_registry_stack(base_stack):
|
|||||||
def get_npm_registry_url():
|
def get_npm_registry_url():
|
||||||
# If an auth token is not defined, we assume the default should be the cerc registry
|
# If an auth token is not defined, we assume the default should be the cerc registry
|
||||||
# If an auth token is defined, we assume the local gitea should be used.
|
# If an auth token is defined, we assume the local gitea should be used.
|
||||||
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config(
|
default_npm_registry_url = (
|
||||||
"CERC_NPM_AUTH_TOKEN", default=None
|
"http://gitea.local:3000/api/packages/cerc-io/npm/"
|
||||||
) else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
if config("CERC_NPM_AUTH_TOKEN", default=None)
|
||||||
|
else "https://git.vdb.to/api/packages/cerc-io/npm/"
|
||||||
|
)
|
||||||
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -34,14 +35,17 @@ from stack_orchestrator.build.publish import publish_image
|
|||||||
from stack_orchestrator.build.build_util import get_containers_in_scope
|
from stack_orchestrator.build.build_util import get_containers_in_scope
|
||||||
|
|
||||||
# TODO: find a place for this
|
# TODO: find a place for this
|
||||||
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
# epilog="Config provided either in .env or settings.ini or env vars:
|
||||||
|
# CERC_REPO_BASE_DIR (defaults to ~/cerc)"
|
||||||
|
|
||||||
|
|
||||||
def make_container_build_env(dev_root_path: str,
|
def make_container_build_env(
|
||||||
container_build_dir: str,
|
dev_root_path: str,
|
||||||
debug: bool,
|
container_build_dir: str,
|
||||||
force_rebuild: bool,
|
debug: bool,
|
||||||
extra_build_args: str):
|
force_rebuild: bool,
|
||||||
|
extra_build_args: str,
|
||||||
|
):
|
||||||
container_build_env = {
|
container_build_env = {
|
||||||
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
"CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
|
||||||
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
"CERC_GO_AUTH_TOKEN": config("CERC_GO_AUTH_TOKEN", default=""),
|
||||||
@ -50,11 +54,15 @@ def make_container_build_env(dev_root_path: str,
|
|||||||
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
"CERC_CONTAINER_BASE_DIR": container_build_dir,
|
||||||
"CERC_HOST_UID": f"{os.getuid()}",
|
"CERC_HOST_UID": f"{os.getuid()}",
|
||||||
"CERC_HOST_GID": f"{os.getgid()}",
|
"CERC_HOST_GID": f"{os.getgid()}",
|
||||||
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
|
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0"),
|
||||||
}
|
}
|
||||||
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
container_build_env.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
docker_host_env = os.getenv("DOCKER_HOST")
|
docker_host_env = os.getenv("DOCKER_HOST")
|
||||||
if docker_host_env:
|
if docker_host_env:
|
||||||
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
container_build_env.update({"DOCKER_HOST": docker_host_env})
|
||||||
@ -67,12 +75,18 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
print(f"Building: {build_context.container}")
|
print(f"Building: {build_context.container}")
|
||||||
|
|
||||||
default_container_tag = f"{build_context.container}:local"
|
default_container_tag = f"{build_context.container}:local"
|
||||||
build_context.container_build_env.update({"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag})
|
build_context.container_build_env.update(
|
||||||
|
{"CERC_DEFAULT_CONTAINER_IMAGE_TAG": default_container_tag}
|
||||||
|
)
|
||||||
|
|
||||||
# Check if this is in an external stack
|
# Check if this is in an external stack
|
||||||
if stack_is_external(build_context.stack):
|
if stack_is_external(build_context.stack):
|
||||||
container_parent_dir = Path(build_context.stack).parent.parent.joinpath("container-build")
|
container_parent_dir = Path(build_context.stack).parent.parent.joinpath(
|
||||||
temp_build_dir = container_parent_dir.joinpath(build_context.container.replace("/", "-"))
|
"container-build"
|
||||||
|
)
|
||||||
|
temp_build_dir = container_parent_dir.joinpath(
|
||||||
|
build_context.container.replace("/", "-")
|
||||||
|
)
|
||||||
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
temp_build_script_filename = temp_build_dir.joinpath("build.sh")
|
||||||
# Now check if the container exists in the external stack.
|
# Now check if the container exists in the external stack.
|
||||||
if not temp_build_script_filename.exists():
|
if not temp_build_script_filename.exists():
|
||||||
@ -90,21 +104,34 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
build_command = build_script_filename.as_posix()
|
build_command = build_script_filename.as_posix()
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"No script file found: {build_script_filename}, using default build script")
|
print(
|
||||||
repo_dir = build_context.container.split('/')[1]
|
f"No script file found: {build_script_filename}, "
|
||||||
# TODO: make this less of a hack -- should be specified in some metadata somewhere
|
"using default build script"
|
||||||
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
|
)
|
||||||
|
repo_dir = build_context.container.split("/")[1]
|
||||||
|
# TODO: make this less of a hack -- should be specified in
|
||||||
|
# some metadata somewhere. Check if we have a repo for this
|
||||||
|
# container. If not, set the context dir to container-build subdir
|
||||||
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
repo_full_path = os.path.join(build_context.dev_root_path, repo_dir)
|
||||||
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
|
repo_dir_or_build_dir = (
|
||||||
build_command = os.path.join(build_context.container_build_dir,
|
repo_full_path if os.path.exists(repo_full_path) else build_dir
|
||||||
"default-build.sh") + f" {default_container_tag} {repo_dir_or_build_dir}"
|
)
|
||||||
|
build_command = (
|
||||||
|
os.path.join(build_context.container_build_dir, "default-build.sh")
|
||||||
|
+ f" {default_container_tag} {repo_dir_or_build_dir}"
|
||||||
|
)
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
# No PATH at all causes failures with podman.
|
# No PATH at all causes failures with podman.
|
||||||
if "PATH" not in build_context.container_build_env:
|
if "PATH" not in build_context.container_build_env:
|
||||||
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
build_context.container_build_env["PATH"] = os.environ["PATH"]
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Executing: {build_command} with environment: {build_context.container_build_env}")
|
print(
|
||||||
build_result = subprocess.run(build_command, shell=True, env=build_context.container_build_env)
|
f"Executing: {build_command} with environment: "
|
||||||
|
f"{build_context.container_build_env}"
|
||||||
|
)
|
||||||
|
build_result = subprocess.run(
|
||||||
|
build_command, shell=True, env=build_context.container_build_env
|
||||||
|
)
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Return code is: {build_result.returncode}")
|
print(f"Return code is: {build_result.returncode}")
|
||||||
if build_result.returncode != 0:
|
if build_result.returncode != 0:
|
||||||
@ -117,33 +144,61 @@ def process_container(build_context: BuildContext) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these containers")
|
@click.option("--include", help="only build these containers")
|
||||||
@click.option('--exclude', help="don\'t build these containers")
|
@click.option("--exclude", help="don't build these containers")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--publish-images", is_flag=True, default=False, help="Publish the built images in the specified image registry")
|
@click.option(
|
||||||
@click.option("--image-registry", help="Specify the image registry for --publish-images")
|
"--publish-images",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Publish the built images in the specified image registry",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", help="Specify the image registry for --publish-images"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_images, image_registry):
|
def command(
|
||||||
'''build the set of containers required for a complete stack'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
publish_images,
|
||||||
|
image_registry,
|
||||||
|
):
|
||||||
|
"""build the set of containers required for a complete stack"""
|
||||||
|
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
|
|
||||||
if publish_images:
|
if publish_images:
|
||||||
if not image_registry:
|
if not image_registry:
|
||||||
@ -151,21 +206,22 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
|
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
|
|
||||||
container_build_env = make_container_build_env(dev_root_path,
|
container_build_env = make_container_build_env(
|
||||||
container_build_dir,
|
dev_root_path,
|
||||||
opts.o.debug,
|
container_build_dir,
|
||||||
force_rebuild,
|
opts.o.debug,
|
||||||
extra_build_args)
|
force_rebuild,
|
||||||
|
extra_build_args,
|
||||||
|
)
|
||||||
|
|
||||||
for container in containers_in_scope:
|
for container in containers_in_scope:
|
||||||
if include_exclude_check(container, include, exclude):
|
if include_exclude_check(container, include, exclude):
|
||||||
|
|
||||||
build_context = BuildContext(
|
build_context = BuildContext(
|
||||||
stack,
|
stack,
|
||||||
container,
|
container,
|
||||||
container_build_dir,
|
container_build_dir,
|
||||||
container_build_env,
|
container_build_env,
|
||||||
dev_root_path
|
dev_root_path,
|
||||||
)
|
)
|
||||||
result = process_container(build_context)
|
result = process_container(build_context)
|
||||||
if result:
|
if result:
|
||||||
@ -174,10 +230,16 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args, publish_imag
|
|||||||
else:
|
else:
|
||||||
print(f"Error running build for {build_context.container}")
|
print(f"Error running build for {build_context.container}")
|
||||||
if not opts.o.continue_on_error:
|
if not opts.o.continue_on_error:
|
||||||
error_exit("container build failed and --continue-on-error not set, exiting")
|
error_exit(
|
||||||
|
"container build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Container Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Container Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
|
|||||||
@ -32,14 +32,18 @@ builder_js_image_name = "cerc/builder-js:local"
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only build these packages")
|
@click.option("--include", help="only build these packages")
|
||||||
@click.option('--exclude', help="don\'t build these packages")
|
@click.option("--exclude", help="don't build these packages")
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False,
|
@click.option(
|
||||||
help="Override existing target package version check -- force rebuild")
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override existing target package version check -- force rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
||||||
'''build the set of npm packages required for a complete stack'''
|
"""build the set of npm packages required for a complete stack"""
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
quiet = ctx.obj.quiet
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
@ -65,45 +69,54 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
print(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
print(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
build_root_path = os.path.join(dev_root_path, "build-trees")
|
build_root_path = os.path.join(dev_root_path, "build-trees")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Dev Root is: {dev_root_path}')
|
print(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Dev root directory doesn\'t exist, creating')
|
print("Dev root directory doesn't exist, creating")
|
||||||
os.makedirs(dev_root_path)
|
os.makedirs(dev_root_path)
|
||||||
if not os.path.isdir(dev_root_path):
|
if not os.path.isdir(dev_root_path):
|
||||||
print('Build root directory doesn\'t exist, creating')
|
print("Build root directory doesn't exist, creating")
|
||||||
os.makedirs(build_root_path)
|
os.makedirs(build_root_path)
|
||||||
|
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "npm-package-list.txt") as package_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "npm-package-list.txt"
|
||||||
|
) as package_list_file:
|
||||||
all_packages = package_list_file.read().splitlines()
|
all_packages = package_list_file.read().splitlines()
|
||||||
|
|
||||||
packages_in_scope = []
|
packages_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
# TODO: syntax check the input here
|
# TODO: syntax check the input here
|
||||||
packages_in_scope = stack_config['npms']
|
packages_in_scope = stack_config["npms"]
|
||||||
else:
|
else:
|
||||||
packages_in_scope = all_packages
|
packages_in_scope = all_packages
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f'Packages: {packages_in_scope}')
|
print(f"Packages: {packages_in_scope}")
|
||||||
|
|
||||||
def build_package(package):
|
def build_package(package):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(f"Building npm package: {package}")
|
print(f"Building npm package: {package}")
|
||||||
repo_dir = package
|
repo_dir = package
|
||||||
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
repo_full_path = os.path.join(dev_root_path, repo_dir)
|
||||||
# Copy the repo and build that to avoid propagating JS tooling file changes back into the cloned repo
|
# Copy the repo and build that to avoid propagating
|
||||||
|
# JS tooling file changes back into the cloned repo
|
||||||
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
repo_copy_path = os.path.join(build_root_path, repo_dir)
|
||||||
# First delete any old build tree
|
# First delete any old build tree
|
||||||
if os.path.isdir(repo_copy_path):
|
if os.path.isdir(repo_copy_path):
|
||||||
@ -116,41 +129,63 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
|
|||||||
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
print(f"Copying build tree from: {repo_full_path} to: {repo_copy_path}")
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
copytree(repo_full_path, repo_copy_path)
|
copytree(repo_full_path, repo_copy_path)
|
||||||
build_command = ["sh", "-c", f"cd /workspace && build-npm-package-local-dependencies.sh {npm_registry_url}"]
|
build_command = [
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"cd /workspace && "
|
||||||
|
f"build-npm-package-local-dependencies.sh {npm_registry_url}",
|
||||||
|
]
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
if verbose:
|
if verbose:
|
||||||
print(f"Executing: {build_command}")
|
print(f"Executing: {build_command}")
|
||||||
# Originally we used the PEP 584 merge operator:
|
# Originally we used the PEP 584 merge operator:
|
||||||
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} |
|
||||||
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
|
# ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
# but that isn't available in Python 3.8 (default in Ubuntu 20)
|
||||||
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
|
# so for now we use dict.update:
|
||||||
}
|
envs = {
|
||||||
|
"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
|
||||||
|
# Convention used by our web app packages
|
||||||
|
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml",
|
||||||
|
}
|
||||||
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
|
||||||
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
|
||||||
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
|
envs.update(
|
||||||
|
{"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args}
|
||||||
|
if extra_build_args
|
||||||
|
else {}
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
docker.run(builder_js_image_name,
|
docker.run(
|
||||||
remove=True,
|
builder_js_image_name,
|
||||||
interactive=True,
|
remove=True,
|
||||||
tty=True,
|
interactive=True,
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
tty=True,
|
||||||
envs=envs,
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
# TODO: detect this host name in npm_registry_url rather than hard-wiring it
|
envs=envs,
|
||||||
add_hosts=[("gitea.local", "host-gateway")],
|
# TODO: detect this host name in npm_registry_url
|
||||||
volumes=[(repo_copy_path, "/workspace")],
|
# rather than hard-wiring it
|
||||||
command=build_command
|
add_hosts=[("gitea.local", "host-gateway")],
|
||||||
)
|
volumes=[(repo_copy_path, "/workspace")],
|
||||||
# Note that although the docs say that build_result should contain
|
command=build_command,
|
||||||
# the command output as a string, in reality it is always the empty string.
|
)
|
||||||
# Since we detect errors via catching exceptions below, we can safely ignore it here.
|
# Note that although the docs say that build_result should
|
||||||
|
# contain the command output as a string, in reality it is
|
||||||
|
# always the empty string. Since we detect errors via catching
|
||||||
|
# exceptions below, we can safely ignore it here.
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
print(f"Error executing build for {package} in container:\n {e}")
|
print(f"Error executing build for {package} in container:\n {e}")
|
||||||
if not continue_on_error:
|
if not continue_on_error:
|
||||||
print("FATAL Error: build failed and --continue-on-error not set, exiting")
|
print(
|
||||||
|
"FATAL Error: build failed and --continue-on-error "
|
||||||
|
"not set, exiting"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("****** Build Error, continuing because --continue-on-error is set")
|
print(
|
||||||
|
"****** Build Error, continuing because "
|
||||||
|
"--continue-on-error is set"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Skipped")
|
print("Skipped")
|
||||||
@ -168,6 +203,12 @@ def _ensure_prerequisites():
|
|||||||
# Tell the user how to build it if not
|
# Tell the user how to build it if not
|
||||||
images = docker.image.list(builder_js_image_name)
|
images = docker.image.list(builder_js_image_name)
|
||||||
if len(images) == 0:
|
if len(images) == 0:
|
||||||
print(f"FATAL: builder image: {builder_js_image_name} is required but was not found")
|
print(
|
||||||
print("Please run this command to create it: laconic-so --stack build-support build-containers")
|
f"FATAL: builder image: {builder_js_image_name} is required "
|
||||||
|
"but was not found"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Please run this command to create it: "
|
||||||
|
"laconic-so --stack build-support build-containers"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@ -24,6 +24,5 @@ class BuildContext:
|
|||||||
stack: str
|
stack: str
|
||||||
container: str
|
container: str
|
||||||
container_build_dir: Path
|
container_build_dir: Path
|
||||||
container_build_env: Mapping[str,str]
|
container_build_env: Mapping[str, str]
|
||||||
dev_root_path: str
|
dev_root_path: str
|
||||||
|
|
||||||
|
|||||||
@ -20,21 +20,23 @@ from stack_orchestrator.util import get_parsed_stack_config, warn_exit
|
|||||||
|
|
||||||
|
|
||||||
def get_containers_in_scope(stack: str):
|
def get_containers_in_scope(stack: str):
|
||||||
|
|
||||||
containers_in_scope = []
|
containers_in_scope = []
|
||||||
if stack:
|
if stack:
|
||||||
stack_config = get_parsed_stack_config(stack)
|
stack_config = get_parsed_stack_config(stack)
|
||||||
if "containers" not in stack_config or stack_config["containers"] is None:
|
if "containers" not in stack_config or stack_config["containers"] is None:
|
||||||
warn_exit(f"stack {stack} does not define any containers")
|
warn_exit(f"stack {stack} does not define any containers")
|
||||||
containers_in_scope = stack_config['containers']
|
containers_in_scope = stack_config["containers"]
|
||||||
else:
|
else:
|
||||||
# See: https://stackoverflow.com/a/20885799/1701505
|
# See: https://stackoverflow.com/a/20885799/1701505
|
||||||
from stack_orchestrator import data
|
from stack_orchestrator import data
|
||||||
with importlib.resources.open_text(data, "container-image-list.txt") as container_list_file:
|
|
||||||
|
with importlib.resources.open_text(
|
||||||
|
data, "container-image-list.txt"
|
||||||
|
) as container_list_file:
|
||||||
containers_in_scope = container_list_file.read().splitlines()
|
containers_in_scope = container_list_file.read().splitlines()
|
||||||
|
|
||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f'Containers: {containers_in_scope}')
|
print(f"Containers: {containers_in_scope}")
|
||||||
if stack:
|
if stack:
|
||||||
print(f"Stack: {stack}")
|
print(f"Stack: {stack}")
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,8 @@
|
|||||||
# env vars:
|
# env vars:
|
||||||
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
# CERC_REPO_BASE_DIR defaults to ~/cerc
|
||||||
|
|
||||||
# TODO: display the available list of containers; allow re-build of either all or specific containers
|
# TODO: display the available list of containers;
|
||||||
|
# allow re-build of either all or specific containers
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -30,49 +31,57 @@ from stack_orchestrator.build import build_containers
|
|||||||
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
from stack_orchestrator.deploy.webapp.util import determine_base_container, TimedLogger
|
||||||
from stack_orchestrator.build.build_types import BuildContext
|
from stack_orchestrator.build.build_types import BuildContext
|
||||||
|
|
||||||
def create_env_file(env_vars, repo_root):
|
|
||||||
env_file_path = os.path.join(repo_root, '.env')
|
|
||||||
with open(env_file_path, 'w') as env_file:
|
|
||||||
for key, value in env_vars.items():
|
|
||||||
env_file.write(f"{key}={value}\n")
|
|
||||||
return env_file_path
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--base-container')
|
@click.option("--base-container")
|
||||||
@click.option('--source-repo', help="directory containing the webapp to build", required=True)
|
@click.option(
|
||||||
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
|
"--source-repo", help="directory containing the webapp to build", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--force-rebuild",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
help="Override dependency checking -- always rebuild",
|
||||||
|
)
|
||||||
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
@click.option("--extra-build-args", help="Supply extra arguments to build")
|
||||||
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
@click.option("--tag", help="Container tag (default: cerc/<app_name>:local)")
|
||||||
@click.option("--env", help="Environment variables for webapp (format: KEY1=VALUE1,KEY2=VALUE2)", default="")
|
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag, env):
|
def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, tag):
|
||||||
'''build the specified webapp container'''
|
"""build the specified webapp container"""
|
||||||
logger = TimedLogger()
|
logger = TimedLogger()
|
||||||
|
|
||||||
quiet = ctx.obj.quiet
|
|
||||||
debug = ctx.obj.debug
|
debug = ctx.obj.debug
|
||||||
verbose = ctx.obj.verbose
|
verbose = ctx.obj.verbose
|
||||||
local_stack = ctx.obj.local_stack
|
local_stack = ctx.obj.local_stack
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
|
|
||||||
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
|
# See: https://stackoverflow.com/questions/25389095/
|
||||||
container_build_dir = Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
# python-get-path-of-root-project-structure
|
||||||
|
container_build_dir = (
|
||||||
|
Path(__file__).absolute().parent.parent.joinpath("data", "container-build")
|
||||||
|
)
|
||||||
|
|
||||||
if local_stack:
|
if local_stack:
|
||||||
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
|
dev_root_path = os.getcwd()[0 : os.getcwd().rindex("stack-orchestrator")]
|
||||||
logger.log(f'Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: {dev_root_path}')
|
logger.log(
|
||||||
|
f"Local stack dev_root_path (CERC_REPO_BASE_DIR) overridden to: "
|
||||||
|
f"{dev_root_path}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
|
dev_root_path = os.path.expanduser(
|
||||||
|
config("CERC_REPO_BASE_DIR", default="~/cerc")
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f'Dev Root is: {dev_root_path}')
|
logger.log(f"Dev Root is: {dev_root_path}")
|
||||||
|
|
||||||
if not base_container:
|
if not base_container:
|
||||||
base_container = determine_base_container(source_repo)
|
base_container = determine_base_container(source_repo)
|
||||||
|
|
||||||
# First build the base container.
|
# First build the base container.
|
||||||
container_build_env = build_containers.make_container_build_env(dev_root_path, container_build_dir, debug,
|
container_build_env = build_containers.make_container_build_env(
|
||||||
force_rebuild, extra_build_args)
|
dev_root_path, container_build_dir, debug, force_rebuild, extra_build_args
|
||||||
|
)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Building base container: {base_container}")
|
logger.log(f"Building base container: {base_container}")
|
||||||
@ -92,31 +101,13 @@ def command(ctx, base_container, source_repo, force_rebuild, extra_build_args, t
|
|||||||
if verbose:
|
if verbose:
|
||||||
logger.log(f"Base container {base_container} build finished.")
|
logger.log(f"Base container {base_container} build finished.")
|
||||||
|
|
||||||
# Now build the target webapp. We use the same build script, but with a different Dockerfile and work dir.
|
# Now build the target webapp. We use the same build script,
|
||||||
|
# but with a different Dockerfile and work dir.
|
||||||
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
container_build_env["CERC_WEBAPP_BUILD_RUNNING"] = "true"
|
||||||
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"] = os.path.abspath(source_repo)
|
||||||
|
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = os.path.join(
|
||||||
# Check if Dockerfile exists in the repository
|
container_build_dir, base_container.replace("/", "-"), "Dockerfile.webapp"
|
||||||
repo_dockerfile = os.path.join(container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"], "Dockerfile")
|
)
|
||||||
default_dockerfile = os.path.join(container_build_dir,
|
|
||||||
base_container.replace("/", "-"),
|
|
||||||
"Dockerfile.webapp")
|
|
||||||
|
|
||||||
if os.path.isfile(repo_dockerfile):
|
|
||||||
env_vars = {}
|
|
||||||
if env:
|
|
||||||
for pair in env.split(','):
|
|
||||||
key, value = pair.split('=')
|
|
||||||
env_vars[key.strip()] = value.strip()
|
|
||||||
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = repo_dockerfile
|
|
||||||
|
|
||||||
# Create .env file with environment variables
|
|
||||||
env_file_path = create_env_file(env_vars, container_build_env["CERC_CONTAINER_BUILD_WORK_DIR"])
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_ENV_FILE"] = env_file_path
|
|
||||||
else:
|
|
||||||
container_build_env["CERC_CONTAINER_BUILD_DOCKERFILE"] = default_dockerfile
|
|
||||||
|
|
||||||
if not tag:
|
if not tag:
|
||||||
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
webapp_name = os.path.abspath(source_repo).split(os.path.sep)[-1]
|
||||||
tag = f"cerc/{webapp_name}:local"
|
tag = f"cerc/{webapp_name}:local"
|
||||||
|
|||||||
@ -52,7 +52,8 @@ def _local_tag_for(container: str):
|
|||||||
|
|
||||||
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
# See: https://docker-docs.uclv.cu/registry/spec/api/
|
||||||
# Emulate this:
|
# Emulate this:
|
||||||
# $ curl -u "my-username:my-token" -X GET "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
# $ curl -u "my-username:my-token" -X GET \
|
||||||
|
# "https://<container-registry-hostname>/v2/cerc-io/cerc/test-container/tags/list"
|
||||||
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
# {"name":"cerc-io/cerc/test-container","tags":["202402232130","202402232208"]}
|
||||||
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List[str]:
|
||||||
# registry looks like: git.vdb.to/cerc-io
|
# registry looks like: git.vdb.to/cerc-io
|
||||||
@ -60,7 +61,9 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
url = f"https://{registry_parts[0]}/v2/{registry_parts[1]}/{container}/tags/list"
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Fetching tags from: {url}")
|
print(f"Fetching tags from: {url}")
|
||||||
response = requests.get(url, auth=(registry_info.registry_username, registry_info.registry_token))
|
response = requests.get(
|
||||||
|
url, auth=(registry_info.registry_username, registry_info.registry_token)
|
||||||
|
)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
tag_info = response.json()
|
tag_info = response.json()
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
@ -68,7 +71,10 @@ def _get_tags_for_container(container: str, registry_info: RegistryInfo) -> List
|
|||||||
tags_array = tag_info["tags"]
|
tags_array = tag_info["tags"]
|
||||||
return tags_array
|
return tags_array
|
||||||
else:
|
else:
|
||||||
error_exit(f"failed to fetch tags from image registry, status code: {response.status_code}")
|
error_exit(
|
||||||
|
f"failed to fetch tags from image registry, "
|
||||||
|
f"status code: {response.status_code}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _find_latest(candidate_tags: List[str]):
|
def _find_latest(candidate_tags: List[str]):
|
||||||
@ -79,9 +85,9 @@ def _find_latest(candidate_tags: List[str]):
|
|||||||
return sorted_candidates[-1]
|
return sorted_candidates[-1]
|
||||||
|
|
||||||
|
|
||||||
def _filter_for_platform(container: str,
|
def _filter_for_platform(
|
||||||
registry_info: RegistryInfo,
|
container: str, registry_info: RegistryInfo, tag_list: List[str]
|
||||||
tag_list: List[str]) -> List[str] :
|
) -> List[str]:
|
||||||
filtered_tags = []
|
filtered_tags = []
|
||||||
this_machine = platform.machine()
|
this_machine = platform.machine()
|
||||||
# Translate between Python and docker platform names
|
# Translate between Python and docker platform names
|
||||||
@ -98,7 +104,7 @@ def _filter_for_platform(container: str,
|
|||||||
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
manifest = manifest_cmd.inspect_verbose(remote_tag)
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"manifest: {manifest}")
|
print(f"manifest: {manifest}")
|
||||||
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
image_architecture = manifest["Descriptor"]["platform"]["architecture"]
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"image_architecture: {image_architecture}")
|
print(f"image_architecture: {image_architecture}")
|
||||||
if this_machine == image_architecture:
|
if this_machine == image_architecture:
|
||||||
@ -137,21 +143,44 @@ def _add_local_tag(remote_tag: str, registry: str, local_tag: str):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option('--include', help="only fetch these containers")
|
@click.option("--include", help="only fetch these containers")
|
||||||
@click.option('--exclude', help="don\'t fetch these containers")
|
@click.option("--exclude", help="don't fetch these containers")
|
||||||
@click.option("--force-local-overwrite", is_flag=True, default=False, help="Overwrite a locally built image, if present")
|
@click.option(
|
||||||
@click.option("--image-registry", required=True, help="Specify the image registry to fetch from")
|
"--force-local-overwrite",
|
||||||
@click.option("--registry-username", required=True, help="Specify the image registry username")
|
is_flag=True,
|
||||||
@click.option("--registry-token", required=True, help="Specify the image registry access token")
|
default=False,
|
||||||
|
help="Overwrite a locally built image, if present",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry", required=True, help="Specify the image registry to fetch from"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-username", required=True, help="Specify the image registry username"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-token", required=True, help="Specify the image registry access token"
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, include, exclude, force_local_overwrite, image_registry, registry_username, registry_token):
|
def command(
|
||||||
'''EXPERIMENTAL: fetch the images for a stack from remote registry'''
|
ctx,
|
||||||
|
include,
|
||||||
|
exclude,
|
||||||
|
force_local_overwrite,
|
||||||
|
image_registry,
|
||||||
|
registry_username,
|
||||||
|
registry_token,
|
||||||
|
):
|
||||||
|
"""EXPERIMENTAL: fetch the images for a stack from remote registry"""
|
||||||
|
|
||||||
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
registry_info = RegistryInfo(image_registry, registry_username, registry_token)
|
||||||
docker = DockerClient()
|
docker = DockerClient()
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print("Logging into container registry:")
|
print("Logging into container registry:")
|
||||||
docker.login(registry_info.registry, registry_info.registry_username, registry_info.registry_token)
|
docker.login(
|
||||||
|
registry_info.registry,
|
||||||
|
registry_info.registry_username,
|
||||||
|
registry_info.registry_token,
|
||||||
|
)
|
||||||
# Generate list of target containers
|
# Generate list of target containers
|
||||||
stack = ctx.obj.stack
|
stack = ctx.obj.stack
|
||||||
containers_in_scope = get_containers_in_scope(stack)
|
containers_in_scope = get_containers_in_scope(stack)
|
||||||
@ -172,19 +201,24 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
print(f"Fetching: {image_to_fetch}")
|
print(f"Fetching: {image_to_fetch}")
|
||||||
_fetch_image(image_to_fetch, registry_info)
|
_fetch_image(image_to_fetch, registry_info)
|
||||||
# Now check if the target container already exists exists locally already
|
# Now check if the target container already exists exists locally already
|
||||||
if (_exists_locally(container)):
|
if _exists_locally(container):
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Container image {container} already exists locally")
|
print(f"Container image {container} already exists locally")
|
||||||
# if so, fail unless the user specified force-local-overwrite
|
# if so, fail unless the user specified force-local-overwrite
|
||||||
if (force_local_overwrite):
|
if force_local_overwrite:
|
||||||
# In that case remove the existing :local tag
|
# In that case remove the existing :local tag
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Warning: overwriting local tag from this image: {container} because "
|
print(
|
||||||
"--force-local-overwrite was specified")
|
f"Warning: overwriting local tag from this image: "
|
||||||
|
f"{container} because --force-local-overwrite was specified"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if not opts.o.quiet:
|
if not opts.o.quiet:
|
||||||
print(f"Skipping local tagging for this image: {container} because that would "
|
print(
|
||||||
"overwrite an existing :local tagged image, use --force-local-overwrite to do so.")
|
f"Skipping local tagging for this image: {container} "
|
||||||
|
"because that would overwrite an existing :local tagged "
|
||||||
|
"image, use --force-local-overwrite to do so."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Tag the fetched image with the :local tag
|
# Tag the fetched image with the :local tag
|
||||||
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
_add_local_tag(image_to_fetch, image_registry, local_tag)
|
||||||
@ -192,4 +226,7 @@ def command(ctx, include, exclude, force_local_overwrite, image_registry, regist
|
|||||||
if opts.o.verbose:
|
if opts.o.verbose:
|
||||||
print(f"Excluding: {container}")
|
print(f"Excluding: {container}")
|
||||||
if not all_containers_found:
|
if not all_containers_found:
|
||||||
print("Warning: couldn't find usable images for one or more containers, this stack will not deploy")
|
print(
|
||||||
|
"Warning: couldn't find usable images for one or more containers, "
|
||||||
|
"this stack will not deploy"
|
||||||
|
)
|
||||||
|
|||||||
@ -39,3 +39,8 @@ node_affinities_key = "node-affinities"
|
|||||||
node_tolerations_key = "node-tolerations"
|
node_tolerations_key = "node-tolerations"
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
|
cri_base_filename = "cri-base.json"
|
||||||
|
unlimited_memlock_key = "unlimited-memlock"
|
||||||
|
runtime_class_key = "runtime-class"
|
||||||
|
high_memlock_runtime = "high-memlock"
|
||||||
|
high_memlock_spec_filename = "high-memlock-spec.json"
|
||||||
|
|||||||
@ -20,7 +20,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
image: blastio/blast-geth:${NETWORK:-testnet-sepolia}
|
||||||
@ -51,7 +51,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -73,7 +73,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
- ../config/fixturenet-blast/${NETWORK:-fixturenet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -14,4 +14,3 @@ services:
|
|||||||
- "9090"
|
- "9090"
|
||||||
- "9091"
|
- "9091"
|
||||||
- "1317"
|
- "1317"
|
||||||
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
generate-jwt:
|
generate-jwt:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
blast-geth:
|
blast-geth:
|
||||||
image: blastio/blast-geth:${NETWORK:-mainnet}
|
image: blastio/blast-geth:${NETWORK:-mainnet}
|
||||||
@ -53,7 +53,7 @@ services:
|
|||||||
--nodiscover
|
--nodiscover
|
||||||
--maxpeers=0
|
--maxpeers=0
|
||||||
--rollup.disabletxpoolgossip=true
|
--rollup.disabletxpoolgossip=true
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
depends_on:
|
depends_on:
|
||||||
geth-init:
|
geth-init:
|
||||||
@ -76,7 +76,7 @@ services:
|
|||||||
--rollup.config="/blast/rollup.json"
|
--rollup.config="/blast/rollup.json"
|
||||||
depends_on:
|
depends_on:
|
||||||
- blast-geth
|
- blast-geth
|
||||||
env_file:
|
env_file:
|
||||||
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
- ../config/mainnet-blast/${NETWORK:-mainnet}.config
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@ -17,4 +17,3 @@ services:
|
|||||||
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_REST=https://rest-palvus.pion-1.ntrn.tech
|
||||||
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
- URL_NEUTRON_TEST_RPC=https://rpc-palvus.pion-1.ntrn.tech
|
||||||
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
- WALLET_CONNECT_ID=0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x0x
|
||||||
|
|
||||||
|
|||||||
@ -32,4 +32,4 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
reth_data:
|
reth_data:
|
||||||
lighthouse_data:
|
lighthouse_data:
|
||||||
shared_data:
|
shared_data:
|
||||||
|
|||||||
@ -12,7 +12,7 @@ services:
|
|||||||
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C"
|
||||||
ports:
|
ports:
|
||||||
- "5432"
|
- "5432"
|
||||||
|
|
||||||
test-client:
|
test-client:
|
||||||
image: cerc/test-database-client:local
|
image: cerc/test-database-client:local
|
||||||
|
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
GETH_ROLLUP_SEQUENCERHTTP=https://sequencer.s2.testblast.io
|
||||||
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
OP_NODE_P2P_BOOTNODES=enr:-J-4QM3GLUFfKMSJQuP1UvuKQe8DyovE7Eaiit0l6By4zjTodkR4V8NWXJxNmlg8t8rP-Q-wp3jVmeAOml8cjMj__ROGAYznzb_HgmlkgnY0gmlwhA-cZ_eHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAiuDqvB-AsVSRmnnWr6OHfjgY8YfNclFy9p02flKzXnOg3RjcIJ2YYN1ZHCCdmE,enr:-J-4QDCVpByqQ8nFqCS9aHicqwUfXgzFDslvpEyYz19lvkHLIdtcIGp2d4q5dxHdjRNTO6HXCsnIKxUeuZSPcEbyVQCGAYznzz0RgmlkgnY0gmlwhANiQfuHb3BzdGFja4X947FQAIlzZWNwMjU2azGhAy3AtF2Jh_aPdOohg506Hjmtx-fQ1AKmu71C7PfkWAw9g3RjcIJ2YYN1ZHCCdmE
|
||||||
|
|||||||
@ -1411,4 +1411,4 @@
|
|||||||
"uid": "nT9VeZoVk",
|
"uid": "nT9VeZoVk",
|
||||||
"version": 2,
|
"version": 2,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
|
|||||||
@ -65,7 +65,7 @@ if [ -n "$CERC_L1_ADDRESS" ] && [ -n "$CERC_L1_PRIV_KEY" ]; then
|
|||||||
# Sequencer
|
# Sequencer
|
||||||
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
SEQ=$(echo "$wallet3" | awk '/Address:/{print $2}')
|
||||||
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
SEQ_KEY=$(echo "$wallet3" | awk '/Private key:/{print $3}')
|
||||||
|
|
||||||
echo "Funding accounts."
|
echo "Funding accounts."
|
||||||
wait_for_block 1 300
|
wait_for_block 1 300
|
||||||
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
cast send --from $ADMIN --rpc-url $CERC_L1_RPC --value 5ether $PROPOSER --private-key $ADMIN_KEY
|
||||||
|
|||||||
@ -56,7 +56,7 @@
|
|||||||
"value": "!validator-pubkey"
|
"value": "!validator-pubkey"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"supply": []
|
"supply": []
|
||||||
},
|
},
|
||||||
@ -269,4 +269,4 @@
|
|||||||
"claims": null
|
"claims": null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2084,4 +2084,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -29,4 +29,3 @@
|
|||||||
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
"l1_system_config_address": "0x5531dcff39ec1ec727c4c5d2fc49835368f805a9",
|
||||||
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
"protocol_versions_address": "0x0000000000000000000000000000000000000000"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2388,4 +2388,4 @@
|
|||||||
"clientPolicies": {
|
"clientPolicies": {
|
||||||
"policies": []
|
"policies": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,7 +12,10 @@ from fabric import Connection
|
|||||||
|
|
||||||
|
|
||||||
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
def dump_src_db_to_file(db_host, db_port, db_user, db_password, db_name, file_name):
|
||||||
command = f"pg_dump -h {db_host} -p {db_port} -U {db_user} -d {db_name} -c --inserts -f {file_name}"
|
command = (
|
||||||
|
f"pg_dump -h {db_host} -p {db_port} -U {db_user} "
|
||||||
|
f"-d {db_name} -c --inserts -f {file_name}"
|
||||||
|
)
|
||||||
my_env = os.environ.copy()
|
my_env = os.environ.copy()
|
||||||
my_env["PGPASSWORD"] = db_password
|
my_env["PGPASSWORD"] = db_password
|
||||||
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
print(f"Exporting from {db_host}:{db_port}/{db_name} to {file_name}... ", end="")
|
||||||
|
|||||||
@ -1901,4 +1901,4 @@
|
|||||||
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
"uid": "b54352dd-35f6-4151-97dc-265bab0c67e9",
|
||||||
"version": 18,
|
"version": 18,
|
||||||
"weekStart": ""
|
"weekStart": ""
|
||||||
}
|
}
|
||||||
|
|||||||
@ -849,7 +849,7 @@ groups:
|
|||||||
annotations:
|
annotations:
|
||||||
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
summary: Watcher {{ index $labels "instance" }} of group {{ index $labels "job" }} is falling behind external head by {{ index $values "diff" }}
|
||||||
isPaused: false
|
isPaused: false
|
||||||
|
|
||||||
# Secured Finance
|
# Secured Finance
|
||||||
- uid: secured_finance_diff_external
|
- uid: secured_finance_diff_external
|
||||||
title: secured_finance_watcher_head_tracking
|
title: secured_finance_watcher_head_tracking
|
||||||
|
|||||||
@ -14,7 +14,7 @@ echo ACCOUNT_PRIVATE_KEY=${CERC_PRIVATE_KEY_DEPLOYER} >> .env
|
|||||||
if [ -f ${erc20_address_file} ]; then
|
if [ -f ${erc20_address_file} ]; then
|
||||||
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
echo "${erc20_address_file} already exists, skipping ERC20 contract deployment"
|
||||||
cat ${erc20_address_file}
|
cat ${erc20_address_file}
|
||||||
|
|
||||||
# Keep the container running
|
# Keep the container running
|
||||||
tail -f
|
tail -f
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -940,4 +940,3 @@ ALTER TABLE ONLY public.state
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
|||||||
@ -18,4 +18,3 @@ root@7c4124bb09e3:/src#
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now gerbil commands can be run.
|
Now gerbil commands can be run.
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ local_npm_registry_url=$2
|
|||||||
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
|
||||||
# Use yarn info to get URL checksums etc from the new registry
|
# Use yarn info to get URL checksums etc from the new registry
|
||||||
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
|
||||||
# First check if the target version actually exists.
|
# First check if the target version actually exists.
|
||||||
# If it doesn't exist there will be no .data.dist.tarball element,
|
# If it doesn't exist there will be no .data.dist.tarball element,
|
||||||
# and jq will output the string "null"
|
# and jq will output the string "null"
|
||||||
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
|
||||||
|
|||||||
@ -11,6 +11,8 @@ if len(sys.argv) > 1:
|
|||||||
with open(testnet_config_path) as stream:
|
with open(testnet_config_path) as stream:
|
||||||
data = yaml.safe_load(stream)
|
data = yaml.safe_load(stream)
|
||||||
|
|
||||||
for key, value in data['el_premine'].items():
|
for key, value in data["el_premine"].items():
|
||||||
acct = w3.eth.account.from_mnemonic(data['mnemonic'], account_path=key, passphrase='')
|
acct = w3.eth.account.from_mnemonic(
|
||||||
|
data["mnemonic"], account_path=key, passphrase=""
|
||||||
|
)
|
||||||
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
print("%s,%s,%s" % (key, acct.address, acct.key.hex()))
|
||||||
|
|||||||
@ -4,4 +4,4 @@ out = 'out'
|
|||||||
libs = ['lib']
|
libs = ['lib']
|
||||||
remappings = ['ds-test/=lib/ds-test/src/']
|
remappings = ['ds-test/=lib/ds-test/src/']
|
||||||
|
|
||||||
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
# See more config options https://github.com/gakonst/foundry/tree/master/config
|
||||||
|
|||||||
@ -20,4 +20,4 @@ contract Stateful {
|
|||||||
function inc() public {
|
function inc() public {
|
||||||
x = x + 1;
|
x = x + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,4 +11,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -9,4 +9,4 @@ record:
|
|||||||
foo: bar
|
foo: bar
|
||||||
tags:
|
tags:
|
||||||
- a
|
- a
|
||||||
- b
|
- b
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Build cerc/laconicd
|
# Build cerc/laconicd
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
docker build -t cerc/laconicd:local ${build_command_args} ${CERC_REPO_BASE_DIR}/laconicd
|
||||||
|
|||||||
@ -26,8 +26,14 @@ fi
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
WORK_DIR="${1:-/app}"
|
WORK_DIR="${1:-/app}"
|
||||||
|
|
||||||
|
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
|
||||||
|
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
|
||||||
cd "${WORK_DIR}" || exit 1
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
|
||||||
|
./build-webapp.sh || exit 1
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "next.config.mjs" ]; then
|
if [ -f "next.config.mjs" ]; then
|
||||||
NEXT_CONFIG_JS="next.config.mjs"
|
NEXT_CONFIG_JS="next.config.mjs"
|
||||||
IMPORT_OR_REQUIRE="import"
|
IMPORT_OR_REQUIRE="import"
|
||||||
|
|||||||
@ -30,36 +30,44 @@ fi
|
|||||||
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
|
||||||
cd "$CERC_WEBAPP_FILES_DIR"
|
cd "$CERC_WEBAPP_FILES_DIR"
|
||||||
|
|
||||||
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
if [ -f "./run-webapp.sh" ]; then
|
||||||
mv .next .next.old
|
echo "Running webapp with run-webapp.sh ..."
|
||||||
mv .next-r/.next .
|
cd "${WORK_DIR}" || exit 1
|
||||||
|
./run-webapp.sh &
|
||||||
|
tpid=$!
|
||||||
|
wait $tpid
|
||||||
|
else
|
||||||
|
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
|
||||||
|
mv .next .next.old
|
||||||
|
mv .next-r/.next .
|
||||||
|
|
||||||
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
|
||||||
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
jq -e '.scripts.cerc_generate' package.json >/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
npm run cerc_generate > gen.out 2>&1 &
|
npm run cerc_generate > gen.out 2>&1 &
|
||||||
tail -f gen.out &
|
tail -f gen.out &
|
||||||
tpid=$!
|
tpid=$!
|
||||||
|
|
||||||
count=0
|
count=0
|
||||||
generate_done="false"
|
generate_done="false"
|
||||||
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
grep 'rendered as static' gen.out > /dev/null
|
grep 'rendered as static' gen.out > /dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
generate_done="true"
|
generate_done="true"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $generate_done != "true" ]; then
|
||||||
|
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
if [ $generate_done != "true" ]; then
|
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
||||||
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
|
tpid=""
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
|
|
||||||
tpid=""
|
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
|
||||||
|
fi
|
||||||
|
|||||||
@ -5,4 +5,3 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN yarn
|
RUN yarn
|
||||||
|
|
||||||
|
|||||||
@ -22,7 +22,7 @@ fi
|
|||||||
# infers the directory from which to load chain configuration files
|
# infers the directory from which to load chain configuration files
|
||||||
# by the presence or absense of the substring "testnet" in the host name
|
# by the presence or absense of the substring "testnet" in the host name
|
||||||
# (browser side -- the host name of the host in the address bar of the browser)
|
# (browser side -- the host name of the host in the address bar of the browser)
|
||||||
# Accordingly we configure our network in both directories in order to
|
# Accordingly we configure our network in both directories in order to
|
||||||
# subvert this lunacy.
|
# subvert this lunacy.
|
||||||
explorer_mainnet_config_dir=/app/chains/mainnet
|
explorer_mainnet_config_dir=/app/chains/mainnet
|
||||||
explorer_testnet_config_dir=/app/chains/testnet
|
explorer_testnet_config_dir=/app/chains/testnet
|
||||||
|
|||||||
@ -2,4 +2,4 @@
|
|||||||
# Build cerc/test-container
|
# Build cerc/test-container
|
||||||
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
docker build -t cerc/test-database-client:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR
|
||||||
|
|||||||
@ -8,7 +8,7 @@ CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/data}"
|
|||||||
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
CERC_ENABLE_CORS="${CERC_ENABLE_CORS:-false}"
|
||||||
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
CERC_SINGLE_PAGE_APP="${CERC_SINGLE_PAGE_APP}"
|
||||||
|
|
||||||
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
if [ -z "${CERC_SINGLE_PAGE_APP}" ]; then
|
||||||
# If there is only one HTML file, assume an SPA.
|
# If there is only one HTML file, assume an SPA.
|
||||||
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
if [ 1 -eq $(find "${CERC_WEBAPP_FILES_DIR}" -name '*.html' | wc -l) ]; then
|
||||||
CERC_SINGLE_PAGE_APP=true
|
CERC_SINGLE_PAGE_APP=true
|
||||||
|
|||||||
@ -0,0 +1,260 @@
|
|||||||
|
# Caddy Ingress Controller for kind
|
||||||
|
# Based on: https://github.com/caddyserver/ingress
|
||||||
|
# Provides automatic HTTPS with Let's Encrypt
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
- endpoints
|
||||||
|
- nodes
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses/status
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingressclasses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller-configmap
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
data:
|
||||||
|
# Caddy global options
|
||||||
|
acmeCA: "https://acme-v02.api.letsencrypt.org/directory"
|
||||||
|
email: ""
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
targetPort: https
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: caddy-ingress-controller
|
||||||
|
namespace: caddy-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
spec:
|
||||||
|
serviceAccountName: caddy-ingress-controller
|
||||||
|
terminationGracePeriodSeconds: 60
|
||||||
|
nodeSelector:
|
||||||
|
ingress-ready: "true"
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/master
|
||||||
|
operator: Equal
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Equal
|
||||||
|
containers:
|
||||||
|
- name: caddy-ingress-controller
|
||||||
|
image: caddy/ingress:latest
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: 80
|
||||||
|
hostPort: 80
|
||||||
|
protocol: TCP
|
||||||
|
- name: https
|
||||||
|
containerPort: 443
|
||||||
|
hostPort: 443
|
||||||
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
args:
|
||||||
|
- -config-map=caddy-system/caddy-ingress-controller-configmap
|
||||||
|
- -class-name=caddy
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 128Mi
|
||||||
|
limits:
|
||||||
|
cpu: 1000m
|
||||||
|
memory: 512Mi
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 9765
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_BIND_SERVICE
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
runAsUser: 0
|
||||||
|
runAsGroup: 0
|
||||||
|
volumeMounts:
|
||||||
|
- name: caddy-data
|
||||||
|
mountPath: /data
|
||||||
|
- name: caddy-config
|
||||||
|
mountPath: /config
|
||||||
|
volumes:
|
||||||
|
- name: caddy-data
|
||||||
|
emptyDir: {}
|
||||||
|
- name: caddy-config
|
||||||
|
emptyDir: {}
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: IngressClass
|
||||||
|
metadata:
|
||||||
|
name: caddy
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: caddy-ingress-controller
|
||||||
|
app.kubernetes.io/instance: caddy-ingress
|
||||||
|
annotations:
|
||||||
|
ingressclass.kubernetes.io/is-default-class: "true"
|
||||||
|
spec:
|
||||||
|
controller: caddy.io/ingress-controller
|
||||||
@ -6,7 +6,7 @@ JS/TS/NPM builds need an npm registry to store intermediate package artifacts.
|
|||||||
This can be supplied by the user (e.g. using a hosted registry or even npmjs.com), or a local registry using gitea can be deployed by stack orchestrator.
|
This can be supplied by the user (e.g. using a hosted registry or even npmjs.com), or a local registry using gitea can be deployed by stack orchestrator.
|
||||||
To use a user-supplied registry set these environment variables:
|
To use a user-supplied registry set these environment variables:
|
||||||
|
|
||||||
`CERC_NPM_REGISTRY_URL` and
|
`CERC_NPM_REGISTRY_URL` and
|
||||||
`CERC_NPM_AUTH_TOKEN`
|
`CERC_NPM_AUTH_TOKEN`
|
||||||
|
|
||||||
Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry.
|
Leave `CERC_NPM_REGISTRY_URL` un-set to use the local gitea registry.
|
||||||
@ -22,7 +22,7 @@ $ laconic-so --stack build-support build-containers
|
|||||||
|
|
||||||
```
|
```
|
||||||
$ laconic-so --stack package-registry setup-repositories
|
$ laconic-so --stack package-registry setup-repositories
|
||||||
$ laconic-so --stack package-registry build-containers
|
$ laconic-so --stack package-registry build-containers
|
||||||
$ laconic-so --stack package-registry deploy up
|
$ laconic-so --stack package-registry deploy up
|
||||||
[+] Running 3/3
|
[+] Running 3/3
|
||||||
⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s
|
⠿ Network laconic-aecc4a21d3a502b14522db97d427e850_gitea Created 0.0s
|
||||||
|
|||||||
@ -14,4 +14,3 @@ containers:
|
|||||||
pods:
|
pods:
|
||||||
- fixturenet-blast
|
- fixturenet-blast
|
||||||
- foundry
|
- foundry
|
||||||
|
|
||||||
@ -3,4 +3,3 @@
|
|||||||
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
A "loaded" version of fixturenet-eth, with all the bells and whistles enabled.
|
||||||
|
|
||||||
TODO: write me
|
TODO: write me
|
||||||
|
|
||||||
|
|||||||
@ -12,7 +12,7 @@ $ chmod +x ./laconic-so
|
|||||||
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
$ export PATH=$PATH:$(pwd) # Or move laconic-so to ~/bin or your favorite on-path directory
|
||||||
```
|
```
|
||||||
## 2. Prepare the local build environment
|
## 2. Prepare the local build environment
|
||||||
Note that this step needs only to be done once on a new machine.
|
Note that this step needs only to be done once on a new machine.
|
||||||
Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands:
|
Detailed instructions can be found [here](../build-support/README.md). For the impatient run these commands:
|
||||||
```
|
```
|
||||||
$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil
|
$ laconic-so --stack build-support build-containers --exclude cerc/builder-gerbil
|
||||||
|
|||||||
@ -52,7 +52,7 @@ laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed
|
|||||||
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
|
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
|
||||||
Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
||||||
|
|
||||||
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
||||||
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
||||||
| Recipe | Host Port Mapping |
|
| Recipe | Host Port Mapping |
|
||||||
|--------|-------------------|
|
|--------|-------------------|
|
||||||
@ -62,11 +62,11 @@ In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
|||||||
| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)|
|
| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)|
|
||||||
| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) |
|
| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) |
|
||||||
|
|
||||||
For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `fixturenet-eth-geth-1` RPC to port 8545 and the `op-geth` RPC to port 9545 on the host.
|
For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `fixturenet-eth-geth-1` RPC to port 8545 and the `op-geth` RPC to port 9545 on the host.
|
||||||
|
|
||||||
Or, you may wish to use `any-same` for the initial mappings -- in which case you'll have to edit the spec to file to ensure the various geth instances aren't all trying to publish to host ports 8545/8546 at once.
|
Or, you may wish to use `any-same` for the initial mappings -- in which case you'll have to edit the spec to file to ensure the various geth instances aren't all trying to publish to host ports 8545/8546 at once.
|
||||||
|
|
||||||
### Data volumes
|
### Data volumes
|
||||||
Container data volumes are bind-mounted to specified paths in the host filesystem.
|
Container data volumes are bind-mounted to specified paths in the host filesystem.
|
||||||
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ docker logs -f <CONTAINER_ID>
|
|||||||
|
|
||||||
## Example: bridge some ETH from L1 to L2
|
## Example: bridge some ETH from L1 to L2
|
||||||
|
|
||||||
Send some ETH from the desired account to the `L1StandardBridgeProxy` contract on L1 to test bridging to L2.
|
Send some ETH from the desired account to the `L1StandardBridgeProxy` contract on L1 to test bridging to L2.
|
||||||
|
|
||||||
We can use the testing account `0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F` which is pre-funded and unlocked, and the `cerc/foundry:local` container to make use of the `cast` cli.
|
We can use the testing account `0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F` which is pre-funded and unlocked, and the `cerc/foundry:local` container to make use of the `cast` cli.
|
||||||
|
|
||||||
|
|||||||
@ -14,26 +14,25 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from ruamel.yaml import YAML
|
|
||||||
|
|
||||||
|
|
||||||
def create(context: DeploymentContext, extra_args):
|
def create(context: DeploymentContext, extra_args):
|
||||||
# Slightly modify the base fixturenet-eth compose file to replace the startup script for fixturenet-eth-geth-1
|
# Slightly modify the base fixturenet-eth compose file to replace the
|
||||||
# We need to start geth with the flag to allow non eip-155 compliant transactions in order to publish the
|
# startup script for fixturenet-eth-geth-1
|
||||||
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
|
# We need to start geth with the flag to allow non eip-155 compliant
|
||||||
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
|
# transactions in order to publish the
|
||||||
|
# deterministic-deployment-proxy contract, which itself is a prereq for
|
||||||
|
# Optimism contract deployment
|
||||||
|
fixturenet_eth_compose_file = context.deployment_dir.joinpath(
|
||||||
|
"compose", "docker-compose-fixturenet-eth.yml"
|
||||||
|
)
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
|
new_script = "../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh"
|
||||||
yaml = YAML()
|
|
||||||
yaml_data = yaml.load(yaml_file)
|
|
||||||
|
|
||||||
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
|
def add_geth_volume(yaml_data):
|
||||||
|
if new_script not in yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"]:
|
||||||
|
yaml_data["services"]["fixturenet-eth-geth-1"]["volumes"].append(new_script)
|
||||||
|
|
||||||
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
|
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
|
||||||
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
|
|
||||||
|
|
||||||
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
|
|
||||||
yaml = YAML()
|
|
||||||
yaml.dump(yaml_data, yaml_file)
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@ -38,7 +38,7 @@ laconic-so --stack fixturenet-optimism deploy init --map-ports-to-host any-fixed
|
|||||||
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
|
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
|
||||||
Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
Any ports defined in the Docker compose file are exposed by default with random port assignments, bound to "any" interface (IP address 0.0.0.0), but the port mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
||||||
|
|
||||||
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
||||||
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
||||||
| Recipe | Host Port Mapping |
|
| Recipe | Host Port Mapping |
|
||||||
|--------|-------------------|
|
|--------|-------------------|
|
||||||
@ -48,9 +48,9 @@ In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
|||||||
| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)|
|
| localhost-fixed-random | Bind to 127.0.0.1 using a random port number selected at the time the command is run (not checked for already in use)|
|
||||||
| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) |
|
| any-fixed-random | Bind to 0.0.0.0 using a random port number selected at the time the command is run (not checked for already in use) |
|
||||||
|
|
||||||
For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `op-geth` RPC to an easy to remember port like 8545 or 9545 on the host.
|
For example, you may wish to use `any-fixed-random` to generate the initial mappings and then edit the spec file to set the `op-geth` RPC to an easy to remember port like 8545 or 9545 on the host.
|
||||||
|
|
||||||
### Data volumes
|
### Data volumes
|
||||||
Container data volumes are bind-mounted to specified paths in the host filesystem.
|
Container data volumes are bind-mounted to specified paths in the host filesystem.
|
||||||
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory. The default mappings can be customized by editing the "spec" file generated by `laconic-so deploy init`.
|
||||||
|
|
||||||
|
|||||||
@ -128,7 +128,7 @@ Stack components:
|
|||||||
removed
|
removed
|
||||||
topics
|
topics
|
||||||
transactionHash
|
transactionHash
|
||||||
transactionIndex
|
transactionIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
getEthBlock(
|
getEthBlock(
|
||||||
@ -211,14 +211,14 @@ Stack components:
|
|||||||
hash
|
hash
|
||||||
}
|
}
|
||||||
log {
|
log {
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
block {
|
block {
|
||||||
number
|
number
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
metadata {
|
metadata {
|
||||||
pageEndsAtTimestamp
|
pageEndsAtTimestamp
|
||||||
isLastPage
|
isLastPage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ Stack components:
|
|||||||
* Open watcher Ponder app endpoint http://localhost:42069
|
* Open watcher Ponder app endpoint http://localhost:42069
|
||||||
|
|
||||||
* Try GQL query to see transfer events
|
* Try GQL query to see transfer events
|
||||||
|
|
||||||
```graphql
|
```graphql
|
||||||
{
|
{
|
||||||
transferEvents (orderBy: "timestamp", orderDirection: "desc") {
|
transferEvents (orderBy: "timestamp", orderDirection: "desc") {
|
||||||
@ -251,9 +251,9 @@ Stack components:
|
|||||||
```bash
|
```bash
|
||||||
export TOKEN_ADDRESS=$(docker exec payments-ponder-er20-contracts-1 jq -r '.address' ./deployment/erc20-address.json)
|
export TOKEN_ADDRESS=$(docker exec payments-ponder-er20-contracts-1 jq -r '.address' ./deployment/erc20-address.json)
|
||||||
```
|
```
|
||||||
|
|
||||||
* Transfer token
|
* Transfer token
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker exec -it payments-ponder-er20-contracts-1 bash -c "yarn token:transfer:docker --token ${TOKEN_ADDRESS} --to 0xe22AD83A0dE117bA0d03d5E94Eb4E0d80a69C62a --amount 5000"
|
docker exec -it payments-ponder-er20-contracts-1 bash -c "yarn token:transfer:docker --token ${TOKEN_ADDRESS} --to 0xe22AD83A0dE117bA0d03d5E94Eb4E0d80a69C62a --amount 5000"
|
||||||
```
|
```
|
||||||
|
|||||||
@ -48,7 +48,7 @@ or see the full logs:
|
|||||||
$ laconic-so --stack fixturenet-pocket deploy logs pocket
|
$ laconic-so --stack fixturenet-pocket deploy logs pocket
|
||||||
```
|
```
|
||||||
## 5. Send a relay request to Pocket node
|
## 5. Send a relay request to Pocket node
|
||||||
The Pocket node serves relay requests at `http://localhost:8081/v1/client/sim`
|
The Pocket node serves relay requests at `http://localhost:8081/v1/client/sim`
|
||||||
|
|
||||||
Example request:
|
Example request:
|
||||||
```
|
```
|
||||||
|
|||||||
@ -154,12 +154,12 @@ http://127.0.0.1:<HOST_PORT>/subgraphs/name/sushiswap/v3-lotus/graphql
|
|||||||
deployment
|
deployment
|
||||||
hasIndexingErrors
|
hasIndexingErrors
|
||||||
}
|
}
|
||||||
|
|
||||||
factories {
|
factories {
|
||||||
poolCount
|
poolCount
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
pools {
|
pools {
|
||||||
id
|
id
|
||||||
token0 {
|
token0 {
|
||||||
|
|||||||
@ -7,7 +7,7 @@ We will use the [ethereum-gravatar](https://github.com/graphprotocol/graph-tooli
|
|||||||
- Clone the repo
|
- Clone the repo
|
||||||
```bash
|
```bash
|
||||||
git clone git@github.com:graphprotocol/graph-tooling.git
|
git clone git@github.com:graphprotocol/graph-tooling.git
|
||||||
|
|
||||||
cd graph-tooling
|
cd graph-tooling
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -54,11 +54,11 @@ The following steps should be similar for every subgraph
|
|||||||
- Create and deploy the subgraph
|
- Create and deploy the subgraph
|
||||||
```bash
|
```bash
|
||||||
pnpm graph create example --node <GRAPH_NODE_DEPLOY_ENDPOINT>
|
pnpm graph create example --node <GRAPH_NODE_DEPLOY_ENDPOINT>
|
||||||
|
|
||||||
pnpm graph deploy example --ipfs <GRAPH_NODE_IPFS_ENDPOINT> --node <GRAPH_NODE_DEPLOY_ENDPOINT>
|
pnpm graph deploy example --ipfs <GRAPH_NODE_IPFS_ENDPOINT> --node <GRAPH_NODE_DEPLOY_ENDPOINT>
|
||||||
```
|
```
|
||||||
- `GRAPH_NODE_DEPLOY_ENDPOINT` and `GRAPH_NODE_IPFS_ENDPOINT` will be available after graph-node has been deployed
|
- `GRAPH_NODE_DEPLOY_ENDPOINT` and `GRAPH_NODE_IPFS_ENDPOINT` will be available after graph-node has been deployed
|
||||||
- More details can be seen in [Create a deployment](./README.md#create-a-deployment) section
|
- More details can be seen in [Create a deployment](./README.md#create-a-deployment) section
|
||||||
|
|
||||||
- The subgraph GQL endpoint will be seen after deploy command runs successfully
|
- The subgraph GQL endpoint will be seen after deploy command runs successfully
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
version: "1.0"
|
version: "1.0"
|
||||||
name: kubo
|
name: kubo
|
||||||
description: "Run kubo (IPFS)"
|
description: "Run kubo (IPFS)"
|
||||||
repos:
|
repos:
|
||||||
containers:
|
containers:
|
||||||
pods:
|
pods:
|
||||||
- kubo
|
- kubo
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
```
|
```
|
||||||
laconic-so --stack laconic-dot-com setup-repositories
|
laconic-so --stack laconic-dot-com setup-repositories
|
||||||
laconic-so --stack laconic-dot-com build-containers
|
laconic-so --stack laconic-dot-com build-containers
|
||||||
laconic-so --stack laconic-dot-com deploy init --output laconic-website-spec.yml --map-ports-to-host localhost-same
|
laconic-so --stack laconic-dot-com deploy init --output laconic-website-spec.yml --map-ports-to-host localhost-same
|
||||||
laconic-so --stack laconic-dot-com deploy create --spec-file laconic-website-spec.yml --deployment-dir lx-website
|
laconic-so --stack laconic-dot-com deploy create --spec-file laconic-website-spec.yml --deployment-dir lx-website
|
||||||
laconic-so deployment --dir lx-website start
|
laconic-so deployment --dir lx-website start
|
||||||
|
|||||||
@ -2,6 +2,6 @@
|
|||||||
|
|
||||||
```
|
```
|
||||||
laconic-so --stack lasso setup-repositories
|
laconic-so --stack lasso setup-repositories
|
||||||
laconic-so --stack lasso build-containers
|
laconic-so --stack lasso build-containers
|
||||||
laconic-so --stack lasso deploy up
|
laconic-so --stack lasso deploy up
|
||||||
```
|
```
|
||||||
|
|||||||
@ -22,18 +22,24 @@ import yaml
|
|||||||
def create(context, extra_args):
|
def create(context, extra_args):
|
||||||
# Our goal here is just to copy the json files for blast
|
# Our goal here is just to copy the json files for blast
|
||||||
yml_path = context.deployment_dir.joinpath("spec.yml")
|
yml_path = context.deployment_dir.joinpath("spec.yml")
|
||||||
with open(yml_path, 'r') as file:
|
with open(yml_path, "r") as file:
|
||||||
data = yaml.safe_load(file)
|
data = yaml.safe_load(file)
|
||||||
|
|
||||||
mount_point = data['volumes']['blast-data']
|
mount_point = data["volumes"]["blast-data"]
|
||||||
if mount_point[0] == "/":
|
if mount_point[0] == "/":
|
||||||
deploy_dir = Path(mount_point)
|
deploy_dir = Path(mount_point)
|
||||||
else:
|
else:
|
||||||
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
deploy_dir = context.deployment_dir.joinpath(mount_point)
|
||||||
|
|
||||||
command_context = extra_args[2]
|
command_context = extra_args[2]
|
||||||
compose_file = [f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f][0]
|
compose_file = [
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "genesis.json")
|
f for f in command_context.cluster_context.compose_files if "mainnet-blast" in f
|
||||||
|
][0]
|
||||||
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "genesis.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
source_config_file = Path(compose_file).parent.parent.joinpath("config", "mainnet-blast", "rollup.json")
|
source_config_file = Path(compose_file).parent.parent.joinpath(
|
||||||
|
"config", "mainnet-blast", "rollup.json"
|
||||||
|
)
|
||||||
copy(source_config_file, deploy_dir)
|
copy(source_config_file, deploy_dir)
|
||||||
|
|||||||
@ -92,7 +92,7 @@ volumes:
|
|||||||
mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data
|
mainnet_eth_plugeth_geth_1_data: ./data/mainnet_eth_plugeth_geth_1_data
|
||||||
mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data
|
mainnet_eth_plugeth_lighthouse_1_data: ./data/mainnet_eth_plugeth_lighthouse_1_data
|
||||||
```
|
```
|
||||||
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
||||||
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
||||||
| Recipe | Host Port Mapping |
|
| Recipe | Host Port Mapping |
|
||||||
|--------|-------------------|
|
|--------|-------------------|
|
||||||
|
|||||||
@ -27,6 +27,8 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_plugeth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_plugeth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -92,7 +92,7 @@ volumes:
|
|||||||
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
|
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
|
||||||
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
|
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
|
||||||
```
|
```
|
||||||
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
In addition, a stack-wide port mapping "recipe" can be applied at the time the
|
||||||
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
`laconic-so deploy init` command is run, by supplying the desired recipe with the `--map-ports-to-host` option. The following recipes are supported:
|
||||||
| Recipe | Host Port Mapping |
|
| Recipe | Host Port Mapping |
|
||||||
|--------|-------------------|
|
|--------|-------------------|
|
||||||
|
|||||||
@ -27,6 +27,8 @@ def setup(ctx):
|
|||||||
def create(ctx, extra_args):
|
def create(ctx, extra_args):
|
||||||
# Generate the JWT secret and save to its config file
|
# Generate the JWT secret and save to its config file
|
||||||
secret = token_hex(32)
|
secret = token_hex(32)
|
||||||
jwt_file_path = ctx.deployment_dir.joinpath("data", "mainnet_eth_config_data", "jwtsecret")
|
jwt_file_path = ctx.deployment_dir.joinpath(
|
||||||
with open(jwt_file_path, 'w+') as jwt_file:
|
"data", "mainnet_eth_config_data", "jwtsecret"
|
||||||
|
)
|
||||||
|
with open(jwt_file_path, "w+") as jwt_file:
|
||||||
jwt_file.write(secret)
|
jwt_file.write(secret)
|
||||||
|
|||||||
@ -36,9 +36,9 @@ laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | 'mainnet-109331-no-histor
|
|||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Maximum peer count total=50
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Maximum peer count total=50
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Smartcard socket not found, disabling err="stat /run/pcscd/pcscd.comm: no such file or directory"
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Smartcard socket not found, disabling err="stat /run/pcscd/pcscd.comm: no such file or directory"
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Genesis file is a known preset name="Mainnet-109331 without history"
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Genesis file is a known preset name="Mainnet-109331 without history"
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] Applying genesis state
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] Applying genesis state
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] - Reading epochs unit 0
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] - Reading epochs unit 0
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.054] - Reading blocks unit 0
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.054] - Reading blocks unit 0
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.530] Applied genesis state name=main id=250 genesis=0x4a53c5445584b3bfc20dbfb2ec18ae20037c716f3ba2d9e1da768a9deca17cb4
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.530] Applied genesis state name=main id=250 genesis=0x4a53c5445584b3bfc20dbfb2ec18ae20037c716f3ba2d9e1da768a9deca17cb4
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.531] Regenerated local transaction journal transactions=0 accounts=0
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.531] Regenerated local transaction journal transactions=0 accounts=0
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.532] Starting peer-to-peer node instance=go-opera/v1.1.2-rc.5-50cd051d-1677276206/linux-amd64/go1.19.10
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.532] Starting peer-to-peer node instance=go-opera/v1.1.2-rc.5-50cd051d-1677276206/linux-amd64/go1.19.10
|
||||||
@ -47,7 +47,7 @@ laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.537]
|
|||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.537] IPC endpoint opened url=/root/.opera/opera.ipc
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.537] IPC endpoint opened url=/root/.opera/opera.ipc
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] HTTP server started endpoint=[::]:18545 prefix= cors=* vhosts=localhost
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] HTTP server started endpoint=[::]:18545 prefix= cors=* vhosts=localhost
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] WebSocket enabled url=ws://[::]:18546
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] WebSocket enabled url=ws://[::]:18546
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Rebuilding state snapshot
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Rebuilding state snapshot
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] EVM snapshot module=gossip-store at=000000..000000 generating=true
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] EVM snapshot module=gossip-store at=000000..000000 generating=true
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Resuming state snapshot generation accounts=0 slots=0 storage=0.00B elapsed="189.74µs"
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Resuming state snapshot generation accounts=0 slots=0 storage=0.00B elapsed="189.74µs"
|
||||||
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Generated state snapshot accounts=0 slots=0 storage=0.00B elapsed="265.061µs"
|
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Generated state snapshot accounts=0 slots=0 storage=0.00B elapsed="265.061µs"
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
# Laconic Mainnet Deployment (experimental)
|
# Laconic Mainnet Deployment (experimental)
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,10 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from stack_orchestrator.util import get_yaml
|
from stack_orchestrator.util import get_yaml
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
from stack_orchestrator.deploy.deploy_types import (
|
||||||
|
DeployCommandContext,
|
||||||
|
LaconicStackSetupCommand,
|
||||||
|
)
|
||||||
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
from stack_orchestrator.deploy.deployment_context import DeploymentContext
|
||||||
from stack_orchestrator.deploy.stack_state import State
|
from stack_orchestrator.deploy.stack_state import State
|
||||||
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
from stack_orchestrator.deploy.deploy_util import VolumeMapping, run_container_command
|
||||||
@ -75,7 +78,12 @@ def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
|||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
for gentx_file in gentx_files:
|
for gentx_file in gentx_files:
|
||||||
gentx_file_path = Path(gentx_file)
|
gentx_file_path = Path(gentx_file)
|
||||||
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
copyfile(
|
||||||
|
gentx_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", "gentx", os.path.basename(gentx_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _remove_persistent_peers(network_dir: Path):
|
def _remove_persistent_peers(network_dir: Path):
|
||||||
@ -86,8 +94,13 @@ def _remove_persistent_peers(network_dir: Path):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
||||||
replace_with = "persistent_peers = \"\""
|
replace_with = 'persistent_peers = ""'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -100,8 +113,13 @@ def _insert_persistent_peers(config_dir: Path, new_persistent_peers: str):
|
|||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
persistent_peers_pattern = r'^persistent_peers = ""'
|
persistent_peers_pattern = r'^persistent_peers = ""'
|
||||||
replace_with = f"persistent_peers = \"{new_persistent_peers}\""
|
replace_with = f'persistent_peers = "{new_persistent_peers}"'
|
||||||
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
persistent_peers_pattern,
|
||||||
|
replace_with,
|
||||||
|
config_file_content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
@ -113,9 +131,11 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(config_file_path, "r") as input_file:
|
with open(config_file_path, "r") as input_file:
|
||||||
config_file_content = input_file.read()
|
config_file_content = input_file.read()
|
||||||
cors_pattern = r'^cors_allowed_origins = \[]'
|
cors_pattern = r"^cors_allowed_origins = \[]"
|
||||||
replace_with = 'cors_allowed_origins = ["*"]'
|
replace_with = 'cors_allowed_origins = ["*"]'
|
||||||
config_file_content = re.sub(cors_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
@ -124,9 +144,11 @@ def _enable_cors(config_dir: Path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(app_file_path, "r") as input_file:
|
with open(app_file_path, "r") as input_file:
|
||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
cors_pattern = r'^enabled-unsafe-cors = false'
|
cors_pattern = r"^enabled-unsafe-cors = false"
|
||||||
replace_with = "enabled-unsafe-cors = true"
|
replace_with = "enabled-unsafe-cors = true"
|
||||||
app_file_content = re.sub(cors_pattern, replace_with, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
cors_pattern, replace_with, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
@ -141,7 +163,9 @@ def _set_listen_address(config_dir: Path):
|
|||||||
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
||||||
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
||||||
print(f"Replacing in: {config_file_path}")
|
print(f"Replacing in: {config_file_path}")
|
||||||
config_file_content = re.sub(existing_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
config_file_content = re.sub(
|
||||||
|
existing_pattern, replace_with, config_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(config_file_path, "w") as output_file:
|
with open(config_file_path, "w") as output_file:
|
||||||
output_file.write(config_file_content)
|
output_file.write(config_file_content)
|
||||||
app_file_path = config_dir.joinpath("app.toml")
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
@ -152,10 +176,14 @@ def _set_listen_address(config_dir: Path):
|
|||||||
app_file_content = input_file.read()
|
app_file_content = input_file.read()
|
||||||
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
||||||
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
||||||
app_file_content = re.sub(existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
existing_pattern2 = r'^address = "localhost:9090"'
|
existing_pattern2 = r'^address = "localhost:9090"'
|
||||||
replace_with2 = 'address = "0.0.0.0:9090"'
|
replace_with2 = 'address = "0.0.0.0:9090"'
|
||||||
app_file_content = re.sub(existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE)
|
app_file_content = re.sub(
|
||||||
|
existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE
|
||||||
|
)
|
||||||
with open(app_file_path, "w") as output_file:
|
with open(app_file_path, "w") as output_file:
|
||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
@ -164,7 +192,10 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.ILLEGAL
|
phase = SetupPhase.ILLEGAL
|
||||||
if parameters.initialize_network:
|
if parameters.initialize_network:
|
||||||
if parameters.join_network or parameters.create_network:
|
if parameters.join_network or parameters.create_network:
|
||||||
print("Can't supply --join-network or --create-network with --initialize-network")
|
print(
|
||||||
|
"Can't supply --join-network or --create-network "
|
||||||
|
"with --initialize-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if not parameters.chain_id:
|
if not parameters.chain_id:
|
||||||
print("--chain-id is required")
|
print("--chain-id is required")
|
||||||
@ -176,24 +207,36 @@ def _phase_from_params(parameters):
|
|||||||
phase = SetupPhase.INITIALIZE
|
phase = SetupPhase.INITIALIZE
|
||||||
elif parameters.join_network:
|
elif parameters.join_network:
|
||||||
if parameters.initialize_network or parameters.create_network:
|
if parameters.initialize_network or parameters.create_network:
|
||||||
print("Can't supply --initialize-network or --create-network with --join-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --create-network "
|
||||||
|
"with --join-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.JOIN
|
phase = SetupPhase.JOIN
|
||||||
elif parameters.create_network:
|
elif parameters.create_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --create-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --create-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CREATE
|
phase = SetupPhase.CREATE
|
||||||
elif parameters.connect_network:
|
elif parameters.connect_network:
|
||||||
if parameters.initialize_network or parameters.join_network:
|
if parameters.initialize_network or parameters.join_network:
|
||||||
print("Can't supply --initialize-network or --join-network with --connect-network")
|
print(
|
||||||
|
"Can't supply --initialize-network or --join-network "
|
||||||
|
"with --connect-network"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CONNECT
|
phase = SetupPhase.CONNECT
|
||||||
return phase
|
return phase
|
||||||
|
|
||||||
|
|
||||||
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
def setup(
|
||||||
|
command_context: DeployCommandContext,
|
||||||
|
parameters: LaconicStackSetupCommand,
|
||||||
|
extra_args,
|
||||||
|
):
|
||||||
options = opts.o
|
options = opts.o
|
||||||
|
|
||||||
currency = "alnt" # Does this need to be a parameter?
|
currency = "alnt" # Does this need to be a parameter?
|
||||||
@ -205,12 +248,9 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
|
|
||||||
network_dir = Path(parameters.network_dir).absolute()
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
laconicd_home_path_in_container = "/laconicd-home"
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [
|
mounts = [VolumeMapping(str(network_dir), laconicd_home_path_in_container)]
|
||||||
VolumeMapping(network_dir, laconicd_home_path_in_container)
|
|
||||||
]
|
|
||||||
|
|
||||||
if phase == SetupPhase.INITIALIZE:
|
if phase == SetupPhase.INITIALIZE:
|
||||||
|
|
||||||
# We want to create the directory so if it exists that's an error
|
# We want to create the directory so if it exists that's an error
|
||||||
if os.path.exists(network_dir):
|
if os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} already exists")
|
print(f"Error: network directory {network_dir} already exists")
|
||||||
@ -220,13 +260,18 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
|
|
||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
"laconicd",
|
||||||
--chain-id {parameters.chain_id} --default-denom {currency}", mounts)
|
f"laconicd init {parameters.node_moniker} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
f"--chain-id {parameters.chain_id} --default-denom {currency}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output}")
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
elif phase == SetupPhase.JOIN:
|
elif phase == SetupPhase.JOIN:
|
||||||
# In the join phase (alternative to connect) we are participating in a genesis ceremony for the chain
|
# In the join phase (alternative to connect) we are participating in a
|
||||||
|
# genesis ceremony for the chain
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -234,52 +279,72 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
chain_id = _get_chain_id_from_config(network_dir)
|
chain_id = _get_chain_id_from_config(network_dir)
|
||||||
|
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
f"laconicd genesis add-genesis-account {parameters.key_name} "
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test",
|
f"12900000000000000000000{currency} "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output2}")
|
print(f"Command output: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd genesis gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
f"laconicd genesis gentx {parameters.key_name} "
|
||||||
--chain-id {chain_id} --keyring-backend test",
|
f"90000000000{currency} --home {laconicd_home_path_in_container} "
|
||||||
mounts)
|
f"--chain-id {chain_id} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output3}")
|
print(f"Command output: {output3}")
|
||||||
output4, status4 = run_container_command(
|
output4, status4 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output4}")
|
print(f"Node account address: {output4}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CONNECT:
|
elif phase == SetupPhase.CONNECT:
|
||||||
# In the connect phase (named to not conflict with join) we are making a node that syncs a chain with existing genesis.json
|
# In the connect phase (named to not conflict with join) we are
|
||||||
# but not with validator role. We need this kind of node in order to bootstrap it into a validator after it syncs
|
# making a node that syncs a chain with existing genesis.json
|
||||||
|
# but not with validator role. We need this kind of node in order to
|
||||||
|
# bootstrap it into a validator after it syncs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
command_context,
|
||||||
--keyring-backend test", mounts)
|
"laconicd",
|
||||||
|
f"laconicd keys add {parameters.key_name} "
|
||||||
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node account address: {output2}")
|
print(f"Node account address: {output2}")
|
||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd cometbft show-validator --home {laconicd_home_path_in_container}",
|
f"laconicd cometbft show-validator "
|
||||||
mounts)
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"Node validator address: {output3}")
|
print(f"Node validator address: {output3}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CREATE:
|
elif phase == SetupPhase.CREATE:
|
||||||
@ -287,42 +352,74 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves
|
# In the CREATE phase, we are either a "coordinator" node,
|
||||||
# OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node.
|
# generating the genesis.json file ourselves
|
||||||
|
# OR we are a "not-coordinator" node, consuming a genesis file from
|
||||||
|
# the coordinator node.
|
||||||
if parameters.genesis_file:
|
if parameters.genesis_file:
|
||||||
# We got the genesis file from elsewhere
|
# We got the genesis file from elsewhere
|
||||||
# Copy it into our network dir
|
# Copy it into our network dir
|
||||||
genesis_file_path = Path(parameters.genesis_file)
|
genesis_file_path = Path(parameters.genesis_file)
|
||||||
if not os.path.exists(genesis_file_path):
|
if not os.path.exists(genesis_file_path):
|
||||||
print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.")
|
print(
|
||||||
|
f"Error: supplied genesis file: {parameters.genesis_file} "
|
||||||
|
"does not exist."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
copyfile(
|
||||||
|
genesis_file_path,
|
||||||
|
os.path.join(
|
||||||
|
network_dir, "config", os.path.basename(genesis_file_path)
|
||||||
|
),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# We're generating the genesis file
|
# We're generating the genesis file
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list)
|
other_node_keys = _get_node_keys_from_gentx_files(
|
||||||
|
parameters.gentx_address_list
|
||||||
|
)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
|
outputk = None
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis add-genesis-account {other_node_key} \
|
command_context,
|
||||||
12900000000000000000000{currency}\
|
"laconicd",
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
f"laconicd genesis add-genesis-account {other_node_key} "
|
||||||
if options.debug:
|
f"12900000000000000000000{currency} "
|
||||||
|
f"--home {laconicd_home_path_in_container} "
|
||||||
|
"--keyring-backend test",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
|
if options.debug and outputk is not None:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
# Copy the gentx json files into our network dir
|
# Copy the gentx json files into our network dir
|
||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis collect-gentxs "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
print(f"Generated genesis file, please copy to other nodes as required: \
|
genesis_path = os.path.join(network_dir, "config", "genesis.json")
|
||||||
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
print(
|
||||||
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
f"Generated genesis file, please copy to other nodes "
|
||||||
|
f"as required: {genesis_path}"
|
||||||
|
)
|
||||||
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers
|
||||||
|
# in config.toml so we remove that now
|
||||||
_remove_persistent_peers(network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd genesis validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd genesis validate-genesis "
|
||||||
|
f"--home {laconicd_home_path_in_container}",
|
||||||
|
mounts,
|
||||||
|
)
|
||||||
print(f"validate-genesis result: {output2}")
|
print(f"validate-genesis result: {output2}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -341,15 +438,23 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
config_dir_path = network_dir_path.joinpath("config")
|
config_dir_path = network_dir_path.joinpath("config")
|
||||||
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a config directory: {config_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
data_dir_path = network_dir_path.joinpath("data")
|
data_dir_path = network_dir_path.joinpath("data")
|
||||||
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
||||||
print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}")
|
print(
|
||||||
|
f"Error: supplied network directory does not contain "
|
||||||
|
f"a data directory: {data_dir_path}"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Copy the network directory contents into our deployment
|
# Copy the network directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_config_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-config")
|
deployment_config_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-config"
|
||||||
|
)
|
||||||
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
# If supplied, add the initial persistent peers to the config file
|
# If supplied, add the initial persistent peers to the config file
|
||||||
if extra_args[1]:
|
if extra_args[1]:
|
||||||
@ -360,7 +465,9 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
_set_listen_address(deployment_config_dir)
|
_set_listen_address(deployment_config_dir)
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
deployment_data_dir = deployment_context.deployment_dir.joinpath(
|
||||||
|
"data", "laconicd-data"
|
||||||
|
)
|
||||||
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user