Compare commits
36 Commits
telackey/l
...
main
Author | SHA1 | Date | |
---|---|---|---|
39df4683ac | |||
23ca4c4341 | |||
f64ef5d128 | |||
5f8e809b2d | |||
4a7df2de33 | |||
0c47da42fe | |||
e290c62aca | |||
f1fdc48aaa | |||
a54072de6c | |||
fa21ff2627 | |||
33d395e213 | |||
75ff60752a | |||
44b9709717 | |||
e56da7dcc1 | |||
60d34217f8 | |||
952389abb0 | |||
5c275aa622 | |||
8576137557 | |||
65c1cdf6b1 | |||
265699bc38 | |||
4a7670a5d6 | |||
6087e1cd31 | |||
1def279d26 | |||
64691bd206 | |||
aef5986135 | |||
6f8f0340d3 | |||
7590d6e237 | |||
573f99dbbe | |||
8052c1c25e | |||
a674d13493 | |||
0d4f4509c8 | |||
5af27b1b3a | |||
6c91b87348 | |||
7d18334953 | |||
79c1c5ed99 | |||
dfedd9e9ff |
@ -1,57 +0,0 @@
|
|||||||
name: Fixturenet-Eth-Plugeth-Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-test'
|
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
|
||||||
- cron: '2 14 * * *'
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run an Ethereum plugeth fixturenet test"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Run fixturenet-eth tests"
|
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -1,55 +0,0 @@
|
|||||||
name: Fixturenet-Eth-Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: '*'
|
|
||||||
paths:
|
|
||||||
- '!**'
|
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-test'
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: "Run an Ethereum fixturenet test"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Clone project repository"
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
# At present the stock setup-python action fails on Linux/aarch64
|
|
||||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
|
||||||
- name: "Install Python for ARM on Linux"
|
|
||||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
|
||||||
uses: deadsnakes/action@v3.0.1
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Install Python cases other than ARM on Linux"
|
|
||||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.8'
|
|
||||||
- name: "Print Python version"
|
|
||||||
run: python3 --version
|
|
||||||
- name: "Install shiv"
|
|
||||||
run: pip install shiv
|
|
||||||
- name: "Generate build version file"
|
|
||||||
run: ./scripts/create_build_tag_file.sh
|
|
||||||
- name: "Build local shiv package"
|
|
||||||
run: ./scripts/build_shiv_package.sh
|
|
||||||
- name: "Run fixturenet-eth tests"
|
|
||||||
run: ./tests/fixturenet-eth/run-test.sh
|
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }}
|
|
||||||
- name: Notify DeepStack Slack on CI failure
|
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
|
||||||
with:
|
|
||||||
status: ${{ job.status }}
|
|
||||||
notify_when: 'failure'
|
|
||||||
env:
|
|
||||||
SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }}
|
|
@ -39,7 +39,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
id: build
|
id: build
|
||||||
run: |
|
run: |
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -1,19 +1,22 @@
|
|||||||
name: Fixturenet-Eth-Plugeth-Arm-Test
|
name: K8s Deployment Control Test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: '*'
|
||||||
push:
|
push:
|
||||||
branches: '*'
|
branches: '*'
|
||||||
paths:
|
paths:
|
||||||
- '!**'
|
- '!**'
|
||||||
- '.gitea/workflows/triggers/fixturenet-eth-plugeth-arm-test'
|
- '.gitea/workflows/triggers/test-k8s-deployment-control'
|
||||||
|
- '.gitea/workflows/test-k8s-deployment-control.yml'
|
||||||
|
- 'tests/k8s-deployment-control/run-test.sh'
|
||||||
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
schedule: # Note: coordinate with other tests to not overload runners at the same time of day
|
||||||
- cron: '2 14 * * *'
|
- cron: '3 30 * * *'
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
name: "Run an Ethereum plugeth fixturenet test"
|
name: "Run deployment control suite on kind/k8s"
|
||||||
runs-on: ubuntu-latest-arm
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: "Clone project repository"
|
- name: "Clone project repository"
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -32,13 +35,22 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
run: ./scripts/build_shiv_package.sh
|
run: ./scripts/build_shiv_package.sh
|
||||||
- name: "Run fixturenet-eth tests"
|
- name: "Check cgroups version"
|
||||||
run: ./tests/fixturenet-eth-plugeth/run-test.sh
|
run: mount | grep cgroup
|
||||||
|
- name: "Install kind"
|
||||||
|
run: ./tests/scripts/install-kind.sh
|
||||||
|
- name: "Install Kubectl"
|
||||||
|
run: ./tests/scripts/install-kubectl.sh
|
||||||
|
- name: "Run k8s deployment control test"
|
||||||
|
run: |
|
||||||
|
source /opt/bash-utils/cgroup-helper.sh
|
||||||
|
join_cgroup
|
||||||
|
./tests/k8s-deployment-control/run-test.sh
|
||||||
- name: Notify Vulcanize Slack on CI failure
|
- name: Notify Vulcanize Slack on CI failure
|
||||||
if: ${{ always() && github.ref_name == 'main' }}
|
if: ${{ always() && github.ref_name == 'main' }}
|
||||||
uses: ravsamhq/notify-slack-action@v2
|
uses: ravsamhq/notify-slack-action@v2
|
@ -32,7 +32,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: "Print Python version"
|
- name: "Print Python version"
|
||||||
run: python3 --version
|
run: python3 --version
|
||||||
- name: "Install shiv"
|
- name: "Install shiv"
|
||||||
run: pip install shiv
|
run: pip install shiv==1.0.6
|
||||||
- name: "Generate build version file"
|
- name: "Generate build version file"
|
||||||
run: ./scripts/create_build_tag_file.sh
|
run: ./scripts/create_build_tag_file.sh
|
||||||
- name: "Build local shiv package"
|
- name: "Build local shiv package"
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-plugeth-arm-test CI job
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-plugeth-test CI job
|
|
||||||
trigger
|
|
||||||
trigger
|
|
@ -1,2 +0,0 @@
|
|||||||
Change this file to trigger running the fixturenet-eth-test CI job
|
|
||||||
|
|
@ -7,3 +7,4 @@ Trigger
|
|||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
Trigger
|
Trigger
|
||||||
|
Trigger
|
||||||
|
@ -51,7 +51,7 @@ $ laconic-so build-npms --include <package-name>
|
|||||||
```
|
```
|
||||||
e.g.
|
e.g.
|
||||||
```
|
```
|
||||||
$ laconic-so build-npms --include laconic-sdk
|
$ laconic-so build-npms --include registry-sdk
|
||||||
```
|
```
|
||||||
Build the packages for a stack:
|
Build the packages for a stack:
|
||||||
```
|
```
|
||||||
|
@ -56,7 +56,7 @@ laconic-so --stack fixturenet-laconicd build-npms
|
|||||||
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
Navigate to the Gitea console and switch to the `cerc-io` user then find the `Packages` tab to confirm that these two npm packages have been published:
|
||||||
|
|
||||||
- `@cerc-io/laconic-registry-cli`
|
- `@cerc-io/laconic-registry-cli`
|
||||||
- `@cerc-io/laconic-sdk`
|
- `@cerc-io/registry-sdk`
|
||||||
|
|
||||||
### Build and deploy fixturenet containers
|
### Build and deploy fixturenet containers
|
||||||
|
|
||||||
|
27
docs/k8s-deployment-enhancements.md
Normal file
27
docs/k8s-deployment-enhancements.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# K8S Deployment Enhancements
|
||||||
|
## Controlling pod placement
|
||||||
|
The placement of pods created as part of a stack deployment can be controlled to either avoid certain nodes, or require certain nodes.
|
||||||
|
### Pod/Node Affinity
|
||||||
|
Node affinity rules applied to pods target node labels. The effect is that a pod can only be placed on a node having the specified label value. Note that other pods that do not have any node affinity rules can also be placed on those same nodes. Thus node affinity for a pod controls where that pod can be placed, but does not control where other pods are placed.
|
||||||
|
|
||||||
|
Node affinity for stack pods is specified in the deployment's `spec.yml` file as follows:
|
||||||
|
```
|
||||||
|
node-affinities:
|
||||||
|
- label: nodetype
|
||||||
|
value: typeb
|
||||||
|
```
|
||||||
|
This example denotes that the stack's pods should only be placed on nodes that have the label `nodetype` with value `typeb`.
|
||||||
|
### Node Taint Toleration
|
||||||
|
K8s nodes can be given one or more "taints". These are special fields (distinct from labels) with a name (key) and optional value.
|
||||||
|
When placing pods, the k8s scheduler will only assign a pod to a tainted node if the pod posesses a corresponding "toleration".
|
||||||
|
This is metadata associated with the pod that specifies that the pod "tolerates" a given taint.
|
||||||
|
Therefore taint toleration provides a mechanism by which only certain pods can be placed on specific nodes, and provides a complementary mechanism to node affinity.
|
||||||
|
|
||||||
|
Taint toleration for stack pods is specified in the deployment's `spec.yml` file as follows:
|
||||||
|
```
|
||||||
|
node-tolerations:
|
||||||
|
- key: nodetype
|
||||||
|
value: typeb
|
||||||
|
```
|
||||||
|
This example denotes that the stack's pods will tolerate a taint: `nodetype=typeb`
|
||||||
|
|
@ -11,3 +11,5 @@ tomli==2.0.1
|
|||||||
validators==0.22.0
|
validators==0.22.0
|
||||||
kubernetes>=28.1.0
|
kubernetes>=28.1.0
|
||||||
humanfriendly>=10.0
|
humanfriendly>=10.0
|
||||||
|
python-gnupg>=0.5.2
|
||||||
|
requests>=2.3.2
|
||||||
|
@ -34,5 +34,8 @@ volumes_key = "volumes"
|
|||||||
security_key = "security"
|
security_key = "security"
|
||||||
annotations_key = "annotations"
|
annotations_key = "annotations"
|
||||||
labels_key = "labels"
|
labels_key = "labels"
|
||||||
|
replicas_key = "replicas"
|
||||||
|
node_affinities_key = "node-affinities"
|
||||||
|
node_tolerations_key = "node-tolerations"
|
||||||
kind_config_filename = "kind-config.yml"
|
kind_config_filename = "kind-config.yml"
|
||||||
kube_config_filename = "kubeconfig.yml"
|
kube_config_filename = "kubeconfig.yml"
|
||||||
|
@ -10,6 +10,7 @@ MONIKER="localtestnet"
|
|||||||
KEYRING="test"
|
KEYRING="test"
|
||||||
KEYALGO="secp256k1"
|
KEYALGO="secp256k1"
|
||||||
LOGLEVEL="${LOGLEVEL:-info}"
|
LOGLEVEL="${LOGLEVEL:-info}"
|
||||||
|
DENOM="alnt"
|
||||||
|
|
||||||
|
|
||||||
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
||||||
@ -33,7 +34,7 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
||||||
laconicd init $MONIKER --chain-id $CHAINID --default-denom photon
|
laconicd init $MONIKER --chain-id $CHAINID --default-denom $DENOM
|
||||||
|
|
||||||
update_genesis() {
|
update_genesis() {
|
||||||
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
|
||||||
@ -89,10 +90,12 @@ if [ "$1" == "clean" ] || [ ! -d "$HOME/.laconicd/data/blockstore.db" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
# Allocate genesis accounts (cosmos formatted addresses)
|
||||||
laconicd genesis add-genesis-account $KEY 100000000000000000000000000photon --keyring-backend $KEYRING
|
# 10^30 alnt | 10^12 lnt
|
||||||
|
laconicd genesis add-genesis-account $KEY 1000000000000000000000000000000$DENOM --keyring-backend $KEYRING
|
||||||
|
|
||||||
# Sign genesis transaction
|
# Sign genesis transaction
|
||||||
laconicd genesis gentx $KEY 1000000000000000000000photon --keyring-backend $KEYRING --chain-id $CHAINID
|
# 10^24 alnt | 10^6 lnt
|
||||||
|
laconicd genesis gentx $KEY 1000000000000000000000000$DENOM --keyring-backend $KEYRING --chain-id $CHAINID
|
||||||
|
|
||||||
# Collect genesis tx
|
# Collect genesis tx
|
||||||
laconicd genesis collect-gentxs
|
laconicd genesis collect-gentxs
|
||||||
@ -107,7 +110,7 @@ fi
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=0.0001photon \
|
--minimum-gas-prices=1$DENOM \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--rpc.laddr="tcp://0.0.0.0:26657" \
|
--rpc.laddr="tcp://0.0.0.0:26657" \
|
||||||
--gql-server --gql-playground
|
--gql-server --gql-playground
|
||||||
|
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 350000
|
gas: 350000
|
||||||
fees: 200000photon
|
fees: 2000000alnt
|
||||||
|
@ -6,4 +6,4 @@ services:
|
|||||||
bondId:
|
bondId:
|
||||||
chainId: laconic_9000-1
|
chainId: laconic_9000-1
|
||||||
gas: 250000
|
gas: 250000
|
||||||
fees: 200000photon
|
fees: 2000000alnt
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ LOGLEVEL="info"
|
|||||||
laconicd start \
|
laconicd start \
|
||||||
--pruning=nothing \
|
--pruning=nothing \
|
||||||
--log_level $LOGLEVEL \
|
--log_level $LOGLEVEL \
|
||||||
--minimum-gas-prices=0.0001photon \
|
--minimum-gas-prices=1alnt \
|
||||||
--api.enable \
|
--api.enable \
|
||||||
--gql-server \
|
--gql-server \
|
||||||
--gql-playground
|
--gql-playground
|
||||||
|
@ -68,5 +68,5 @@ ENV PATH="${PATH}:/scripts"
|
|||||||
COPY entrypoint.sh .
|
COPY entrypoint.sh .
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
# Placeholder CMD : generally this will be overridden at run time like :
|
# Placeholder CMD : generally this will be overridden at run time like :
|
||||||
# docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
|
# docker run -it -v /home/builder/cerc/registry-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
|
||||||
CMD node --version
|
CMD node --version
|
||||||
|
@ -14,7 +14,7 @@ funds_balance=$(echo ${funds_response} | jq -r ".[0].balance[0].quantity")
|
|||||||
echo "Balance is: ${funds_balance}"
|
echo "Balance is: ${funds_balance}"
|
||||||
|
|
||||||
# Create a bond
|
# Create a bond
|
||||||
bond_create_result=$(${registry_command} bond create --type photon --quantity 1000000000)
|
bond_create_result=$(${registry_command} bond create --type alnt --quantity 1000000000)
|
||||||
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
bond_id=$(echo ${bond_create_result} | jq -r .bondId)
|
||||||
echo "Created bond with id: ${bond_id}"
|
echo "Created bond with id: ${bond_id}"
|
||||||
|
|
||||||
|
@ -4,5 +4,9 @@ source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
|
|||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
|
|
||||||
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
# Two-stage build is to allow us to pick up both the upstream repo's files, and local files here for config
|
||||||
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/explorer
|
docker build -t cerc/ping-pub-base:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile.base $CERC_REPO_BASE_DIR/cosmos-explorer
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "FATAL: Base container build failed, exiting"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
docker build -t cerc/ping-pub:local ${build_command_args} -f $SCRIPT_DIR/Dockerfile $SCRIPT_DIR
|
||||||
|
@ -7,15 +7,15 @@
|
|||||||
"rpc": [
|
"rpc": [
|
||||||
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
{"provider": "LX-tendermint-rpc", "address": "LACONIC_LACONICD_RPC_URL"}
|
||||||
],
|
],
|
||||||
"sdk_version": "0.45.1",
|
"sdk_version": "0.50.3",
|
||||||
"coin_type": "118",
|
"coin_type": "118",
|
||||||
"min_tx_fee": "800",
|
"min_tx_fee": "800",
|
||||||
"addr_prefix": "ethm",
|
"addr_prefix": "laconic",
|
||||||
"logo": "/logos/cosmos.svg",
|
"logo": "/logos/cosmos.svg",
|
||||||
"assets": [{
|
"assets": [{
|
||||||
"base": "photon",
|
"base": "alnt",
|
||||||
"symbol": "LNT",
|
"symbol": "LNT",
|
||||||
"exponent": "6",
|
"exponent": "18",
|
||||||
"coingecko_id": "cosmos",
|
"coingecko_id": "cosmos",
|
||||||
"logo": "/logos/cosmos.svg"
|
"logo": "/logos/cosmos.svg"
|
||||||
}]
|
}]
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
laconic-sdk
|
registry-sdk
|
||||||
laconic-registry-cli
|
laconic-registry-cli
|
||||||
laconic-console
|
laconic-console
|
||||||
debug
|
debug
|
||||||
|
@ -7,7 +7,7 @@ github.com/cerc-io/ipld-eth-db-validator
|
|||||||
github.com/cerc-io/ipld-eth-beacon-indexer
|
github.com/cerc-io/ipld-eth-beacon-indexer
|
||||||
github.com/cerc-io/ipld-eth-beacon-db
|
github.com/cerc-io/ipld-eth-beacon-db
|
||||||
github.com/cerc-io/laconicd
|
github.com/cerc-io/laconicd
|
||||||
github.com/cerc-io/laconic-sdk
|
github.com/cerc-io/registry-sdk
|
||||||
github.com/cerc-io/laconic-registry-cli
|
github.com/cerc-io/laconic-registry-cli
|
||||||
github.com/cerc-io/laconic-console
|
github.com/cerc-io/laconic-console
|
||||||
github.com/cerc-io/mobymask-watcher-ts
|
github.com/cerc-io/mobymask-watcher-ts
|
||||||
|
@ -58,5 +58,5 @@ Now npm packages can be built:
|
|||||||
Ensure that `CERC_NPM_AUTH_TOKEN` is set with the token printed above when the package-registry stack was deployed (the actual token value will be different than shown in this example):
|
Ensure that `CERC_NPM_AUTH_TOKEN` is set with the token printed above when the package-registry stack was deployed (the actual token value will be different than shown in this example):
|
||||||
```
|
```
|
||||||
$ export CERC_NPM_AUTH_TOKEN=84fe66a73698bf11edbdccd0a338236b7d1d5c45
|
$ export CERC_NPM_AUTH_TOKEN=84fe66a73698bf11edbdccd0a338236b7d1d5c45
|
||||||
$ laconic-so build-npms --include laconic-sdk,laconic-registry-cli
|
$ laconic-so build-npms --include registry-sdk,laconic-registry-cli
|
||||||
```
|
```
|
||||||
|
@ -64,5 +64,6 @@ $ laconic-so --stack fixturenet-laconic-loaded deploy exec cli ./scripts/create-
|
|||||||
Balance is: 99998999999999998999600000
|
Balance is: 99998999999999998999600000
|
||||||
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
Created bond with id: dd88e8d6f9567b32b28e70552aea4419c5dd3307ebae85a284d1fe38904e301a
|
||||||
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
Published demo-record-1.yml with id: bafyreierh3xnfivexlscdwubvczmddsnf46uytyfvrbdhkjzztvsz6ruly
|
||||||
|
...
|
||||||
```
|
```
|
||||||
The published record should be visible in the console.
|
The published records should be visible in the console.
|
||||||
|
@ -7,11 +7,11 @@ repos:
|
|||||||
- github.com/lirewine/crypto
|
- github.com/lirewine/crypto
|
||||||
- github.com/lirewine/gem
|
- github.com/lirewine/gem
|
||||||
- github.com/lirewine/sdk
|
- github.com/lirewine/sdk
|
||||||
- git.vdb.to/cerc-io/laconic-sdk
|
- git.vdb.to/cerc-io/registry-sdk
|
||||||
- git.vdb.to/cerc-io/laconic-registry-cli
|
- git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
- git.vdb.to/cerc-io/laconic-console
|
- git.vdb.to/cerc-io/laconic-console
|
||||||
npms:
|
npms:
|
||||||
- laconic-sdk
|
- registry-sdk
|
||||||
- laconic-registry-cli
|
- laconic-registry-cli
|
||||||
- debug
|
- debug
|
||||||
- crypto
|
- crypto
|
||||||
@ -30,4 +30,3 @@ config:
|
|||||||
cli:
|
cli:
|
||||||
key: laconicd.mykey
|
key: laconicd.mykey
|
||||||
address: laconicd.myaddress
|
address: laconicd.myaddress
|
||||||
|
|
||||||
|
@ -3,10 +3,10 @@ name: fixturenet-laconicd
|
|||||||
description: "A laconicd fixturenet"
|
description: "A laconicd fixturenet"
|
||||||
repos:
|
repos:
|
||||||
- git.vdb.to/cerc-io/laconicd
|
- git.vdb.to/cerc-io/laconicd
|
||||||
- git.vdb.to/cerc-io/laconic-sdk
|
- git.vdb.to/cerc-io/registry-sdk
|
||||||
- git.vdb.to/cerc-io/laconic-registry-cli
|
- git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
npms:
|
npms:
|
||||||
- laconic-sdk
|
- registry-sdk
|
||||||
- laconic-registry-cli
|
- laconic-registry-cli
|
||||||
containers:
|
containers:
|
||||||
- cerc/laconicd
|
- cerc/laconicd
|
||||||
|
@ -22,7 +22,6 @@ from stack_orchestrator.opts import opts
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile, copytree
|
from shutil import copyfile, copytree
|
||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import tomli
|
import tomli
|
||||||
@ -34,8 +33,9 @@ default_spec_file_content = ""
|
|||||||
class SetupPhase(Enum):
|
class SetupPhase(Enum):
|
||||||
INITIALIZE = 1
|
INITIALIZE = 1
|
||||||
JOIN = 2
|
JOIN = 2
|
||||||
CREATE = 3
|
CONNECT = 3
|
||||||
ILLEGAL = 3
|
CREATE = 4
|
||||||
|
ILLEGAL = 5
|
||||||
|
|
||||||
|
|
||||||
def _client_toml_path(network_dir: Path):
|
def _client_toml_path(network_dir: Path):
|
||||||
@ -62,29 +62,13 @@ def _get_node_moniker_from_config(network_dir: Path):
|
|||||||
return moniker
|
return moniker
|
||||||
|
|
||||||
|
|
||||||
def _get_node_key_from_gentx(gentx_file_name: str):
|
|
||||||
gentx_file_path = Path(gentx_file_name)
|
|
||||||
if gentx_file_path.exists():
|
|
||||||
with open(Path(gentx_file_name), "rb") as f:
|
|
||||||
parsed_json = json.load(f)
|
|
||||||
return parsed_json['body']['messages'][0]['delegator_address']
|
|
||||||
else:
|
|
||||||
print(f"Error: gentx file: {gentx_file_name} does not exist")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def _comma_delimited_to_list(list_str: str):
|
def _comma_delimited_to_list(list_str: str):
|
||||||
return list_str.split(",") if list_str else []
|
return list_str.split(",") if list_str else []
|
||||||
|
|
||||||
|
|
||||||
def _get_node_keys_from_gentx_files(gentx_file_list: str):
|
def _get_node_keys_from_gentx_files(gentx_address_list: str):
|
||||||
node_keys = []
|
gentx_addresses = _comma_delimited_to_list(gentx_address_list)
|
||||||
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
return gentx_addresses
|
||||||
for gentx_file in gentx_files:
|
|
||||||
node_key = _get_node_key_from_gentx(gentx_file)
|
|
||||||
if node_key:
|
|
||||||
node_keys.append(node_key)
|
|
||||||
return node_keys
|
|
||||||
|
|
||||||
|
|
||||||
def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
def _copy_gentx_files(network_dir: Path, gentx_file_list: str):
|
||||||
@ -147,6 +131,35 @@ def _enable_cors(config_dir: Path):
|
|||||||
output_file.write(app_file_content)
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
|
|
||||||
|
def _set_listen_address(config_dir: Path):
|
||||||
|
config_file_path = config_dir.joinpath("config.toml")
|
||||||
|
if not config_file_path.exists():
|
||||||
|
print("Error: config.toml not found")
|
||||||
|
sys.exit(1)
|
||||||
|
with open(config_file_path, "r") as input_file:
|
||||||
|
config_file_content = input_file.read()
|
||||||
|
existing_pattern = r'^laddr = "tcp://127.0.0.1:26657"'
|
||||||
|
replace_with = 'laddr = "tcp://0.0.0.0:26657"'
|
||||||
|
print(f"Replacing in: {config_file_path}")
|
||||||
|
config_file_content = re.sub(existing_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
||||||
|
with open(config_file_path, "w") as output_file:
|
||||||
|
output_file.write(config_file_content)
|
||||||
|
app_file_path = config_dir.joinpath("app.toml")
|
||||||
|
if not app_file_path.exists():
|
||||||
|
print("Error: app.toml not found")
|
||||||
|
sys.exit(1)
|
||||||
|
with open(app_file_path, "r") as input_file:
|
||||||
|
app_file_content = input_file.read()
|
||||||
|
existing_pattern1 = r'^address = "tcp://localhost:1317"'
|
||||||
|
replace_with1 = 'address = "tcp://0.0.0.0:1317"'
|
||||||
|
app_file_content = re.sub(existing_pattern1, replace_with1, app_file_content, flags=re.MULTILINE)
|
||||||
|
existing_pattern2 = r'^address = "localhost:9090"'
|
||||||
|
replace_with2 = 'address = "0.0.0.0:9090"'
|
||||||
|
app_file_content = re.sub(existing_pattern2, replace_with2, app_file_content, flags=re.MULTILINE)
|
||||||
|
with open(app_file_path, "w") as output_file:
|
||||||
|
output_file.write(app_file_content)
|
||||||
|
|
||||||
|
|
||||||
def _phase_from_params(parameters):
|
def _phase_from_params(parameters):
|
||||||
phase = SetupPhase.ILLEGAL
|
phase = SetupPhase.ILLEGAL
|
||||||
if parameters.initialize_network:
|
if parameters.initialize_network:
|
||||||
@ -171,6 +184,11 @@ def _phase_from_params(parameters):
|
|||||||
print("Can't supply --initialize-network or --join-network with --create-network")
|
print("Can't supply --initialize-network or --join-network with --create-network")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
phase = SetupPhase.CREATE
|
phase = SetupPhase.CREATE
|
||||||
|
elif parameters.connect_network:
|
||||||
|
if parameters.initialize_network or parameters.join_network:
|
||||||
|
print("Can't supply --initialize-network or --join-network with --connect-network")
|
||||||
|
sys.exit(1)
|
||||||
|
phase = SetupPhase.CONNECT
|
||||||
return phase
|
return phase
|
||||||
|
|
||||||
|
|
||||||
@ -178,7 +196,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
|
|
||||||
options = opts.o
|
options = opts.o
|
||||||
|
|
||||||
currency = "stake" # Does this need to be a parameter?
|
currency = "alnt" # Does this need to be a parameter?
|
||||||
|
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"parameters: {parameters}")
|
print(f"parameters: {parameters}")
|
||||||
@ -203,11 +221,12 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
output, status = run_container_command(
|
output, status = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
||||||
--chain-id {parameters.chain_id}", mounts)
|
--chain-id {parameters.chain_id} --default-denom {currency}", mounts)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output}")
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
elif phase == SetupPhase.JOIN:
|
elif phase == SetupPhase.JOIN:
|
||||||
|
# In the join phase (alternative to connect) we are participating in a genesis ceremony for the chain
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
print(f"Error: network directory {network_dir} doesn't exist")
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -222,7 +241,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
output2, status2 = run_container_command(
|
output2, status2 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
f"laconicd genesis add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test",
|
--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
mounts)
|
mounts)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
@ -230,7 +249,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
output3, status3 = run_container_command(
|
output3, status3 = run_container_command(
|
||||||
command_context,
|
command_context,
|
||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
f"laconicd genesis gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
||||||
--chain-id {chain_id} --keyring-backend test",
|
--chain-id {chain_id} --keyring-backend test",
|
||||||
mounts)
|
mounts)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
@ -240,7 +259,28 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
"laconicd",
|
"laconicd",
|
||||||
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
mounts)
|
mounts)
|
||||||
print(f"Node validator address: {output4}")
|
print(f"Node account address: {output4}")
|
||||||
|
|
||||||
|
elif phase == SetupPhase.CONNECT:
|
||||||
|
# In the connect phase (named to not conflict with join) we are making a node that syncs a chain with existing genesis.json
|
||||||
|
# but not with validator role. We need this kind of node in order to bootstrap it into a validator after it syncs
|
||||||
|
output1, status1 = run_container_command(
|
||||||
|
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
||||||
|
--keyring-backend test", mounts)
|
||||||
|
if options.debug:
|
||||||
|
print(f"Command output: {output1}")
|
||||||
|
output2, status2 = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts)
|
||||||
|
print(f"Node account address: {output2}")
|
||||||
|
output3, status3 = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd cometbft show-validator --home {laconicd_home_path_in_container}",
|
||||||
|
mounts)
|
||||||
|
print(f"Node validator address: {output3}")
|
||||||
|
|
||||||
elif phase == SetupPhase.CREATE:
|
elif phase == SetupPhase.CREATE:
|
||||||
if not os.path.exists(network_dir):
|
if not os.path.exists(network_dir):
|
||||||
@ -259,15 +299,13 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
||||||
else:
|
else:
|
||||||
# We're generating the genesis file
|
# We're generating the genesis file
|
||||||
if not parameters.gentx_file_list:
|
|
||||||
print("Error: --gentx-files must be supplied")
|
|
||||||
sys.exit(1)
|
|
||||||
# First look in the supplied gentx files for the other nodes' keys
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_file_list)
|
other_node_keys = _get_node_keys_from_gentx_files(parameters.gentx_address_list)
|
||||||
# Add those keys to our genesis, with balances we determine here (why?)
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
for other_node_key in other_node_keys:
|
for other_node_key in other_node_keys:
|
||||||
outputk, statusk = run_container_command(
|
outputk, statusk = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd add-genesis-account {other_node_key} 12900000000000000000000{currency}\
|
command_context, "laconicd", f"laconicd genesis add-genesis-account {other_node_key} \
|
||||||
|
12900000000000000000000{currency}\
|
||||||
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {outputk}")
|
print(f"Command output: {outputk}")
|
||||||
@ -275,7 +313,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
_copy_gentx_files(network_dir, parameters.gentx_file_list)
|
||||||
# Now we can run collect-gentxs
|
# Now we can run collect-gentxs
|
||||||
output1, status1 = run_container_command(
|
output1, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
command_context, "laconicd", f"laconicd genesis collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
||||||
if options.debug:
|
if options.debug:
|
||||||
print(f"Command output: {output1}")
|
print(f"Command output: {output1}")
|
||||||
print(f"Generated genesis file, please copy to other nodes as required: \
|
print(f"Generated genesis file, please copy to other nodes as required: \
|
||||||
@ -284,7 +322,7 @@ def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCo
|
|||||||
_remove_persistent_peers(network_dir)
|
_remove_persistent_peers(network_dir)
|
||||||
# In both cases we validate the genesis file now
|
# In both cases we validate the genesis file now
|
||||||
output2, status1 = run_container_command(
|
output2, status1 = run_container_command(
|
||||||
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
command_context, "laconicd", f"laconicd genesis validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
||||||
print(f"validate-genesis result: {output2}")
|
print(f"validate-genesis result: {output2}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -319,6 +357,7 @@ def create(deployment_context: DeploymentContext, extra_args):
|
|||||||
_insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
|
_insert_persistent_peers(deployment_config_dir, initial_persistent_peers)
|
||||||
# Enable CORS headers so explorers and so on can talk to the node
|
# Enable CORS headers so explorers and so on can talk to the node
|
||||||
_enable_cors(deployment_config_dir)
|
_enable_cors(deployment_config_dir)
|
||||||
|
_set_listen_address(deployment_config_dir)
|
||||||
# Copy the data directory contents into our deployment
|
# Copy the data directory contents into our deployment
|
||||||
# TODO: change this to work with non local paths
|
# TODO: change this to work with non local paths
|
||||||
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
deployment_data_dir = deployment_context.deployment_dir.joinpath("data", "laconicd-data")
|
||||||
@ -331,7 +370,6 @@ def init(command_context: DeployCommandContext):
|
|||||||
|
|
||||||
|
|
||||||
def get_state(command_context: DeployCommandContext):
|
def get_state(command_context: DeployCommandContext):
|
||||||
print("Here we get state")
|
|
||||||
return State.CONFIGURED
|
return State.CONFIGURED
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,12 +7,12 @@ repos:
|
|||||||
- github.com/lirewine/crypto
|
- github.com/lirewine/crypto
|
||||||
- github.com/lirewine/gem
|
- github.com/lirewine/gem
|
||||||
- github.com/lirewine/sdk
|
- github.com/lirewine/sdk
|
||||||
- git.vdb.to/cerc-io/laconic-sdk
|
- git.vdb.to/cerc-io/registry-sdk
|
||||||
- git.vdb.to/cerc-io/laconic-registry-cli
|
- git.vdb.to/cerc-io/laconic-registry-cli
|
||||||
- git.vdb.to/cerc-io/laconic-console
|
- git.vdb.to/cerc-io/laconic-console
|
||||||
- github.com/ping-pub/explorer
|
- git.vdb.to/cerc-io/cosmos-explorer
|
||||||
npms:
|
npms:
|
||||||
- laconic-sdk
|
- registry-sdk
|
||||||
- laconic-registry-cli
|
- laconic-registry-cli
|
||||||
- debug
|
- debug
|
||||||
- crypto
|
- crypto
|
||||||
|
@ -8,8 +8,11 @@ echo "Environment variables:"
|
|||||||
env
|
env
|
||||||
# Test laconic stack
|
# Test laconic stack
|
||||||
echo "Running laconic stack test"
|
echo "Running laconic stack test"
|
||||||
# Bit of a hack, test the most recent package
|
if [ "$1" == "from-path" ]; then
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
TEST_TARGET_SO="laconic-so"
|
||||||
|
else
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
fi
|
||||||
# Set a non-default repo dir
|
# Set a non-default repo dir
|
||||||
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
@ -109,7 +109,7 @@ Setup a test chain:
|
|||||||
```bash
|
```bash
|
||||||
export CERC_NPM_REGISTRY_URL=https://git.vdb.to/api/packages/cerc-io/npm/
|
export CERC_NPM_REGISTRY_URL=https://git.vdb.to/api/packages/cerc-io/npm/
|
||||||
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/laconic-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
laconic-so --stack fixturenet-laconic-loaded setup-repositories --include git.vdb.to/cerc-io/laconicd,git.vdb.to/cerc-io/registry-sdk,git.vdb.to/cerc-io/laconic-registry-cli,git.vdb.to/cerc-io/laconic-console
|
||||||
|
|
||||||
laconic-so --stack fixturenet-laconic-loaded build-containers
|
laconic-so --stack fixturenet-laconic-loaded build-containers
|
||||||
|
|
||||||
|
@ -2,4 +2,50 @@
|
|||||||
|
|
||||||
The Package Registry Stack supports a build environment that requires a package registry (initially for NPM packages only).
|
The Package Registry Stack supports a build environment that requires a package registry (initially for NPM packages only).
|
||||||
|
|
||||||
Setup instructions can be found [here](../build-support/README.md).
|
## Setup
|
||||||
|
|
||||||
|
* Setup required repos and build containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack package-registry setup-repositories
|
||||||
|
laconic-so --stack package-registry build-containers
|
||||||
|
```
|
||||||
|
|
||||||
|
* Create a deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so --stack package-registry deploy init --output package-registry-spec.yml
|
||||||
|
# Update port mapping in the laconic-loaded.spec file to resolve port conflicts on host if any
|
||||||
|
|
||||||
|
laconic-so --stack package-registry deploy create --deployment-dir package-registry-deployment --spec-file package-registry-spec.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
* Start the deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
laconic-so deployment --dir package-registry-deployment start
|
||||||
|
```
|
||||||
|
|
||||||
|
* The local gitea registry can now be accessed at <http://localhost:3000> (the username and password can be taken from the deployment logs)
|
||||||
|
|
||||||
|
* Configure the hostname `gitea.local`:
|
||||||
|
|
||||||
|
Update `/etc/hosts`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo nano /etc/hosts
|
||||||
|
|
||||||
|
# Add the following line
|
||||||
|
127.0.0.1 gitea.local
|
||||||
|
```
|
||||||
|
|
||||||
|
Check resolution:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ping gitea.local
|
||||||
|
|
||||||
|
PING gitea.local (127.0.0.1) 56(84) bytes of data.
|
||||||
|
64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.147 ms
|
||||||
|
64 bytes from localhost (127.0.0.1): icmp_seq=2 ttl=64 time=0.033 ms
|
||||||
|
...
|
||||||
|
```
|
||||||
|
@ -29,14 +29,14 @@ class DockerDeployer(Deployer):
|
|||||||
compose_env_file=compose_env_file)
|
compose_env_file=compose_env_file)
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.up(detach=detach, services=services)
|
return self.docker.compose.up(detach=detach, services=services)
|
||||||
except DockerException as e:
|
except DockerException as e:
|
||||||
raise DeployerException(e)
|
raise DeployerException(e)
|
||||||
|
|
||||||
def down(self, timeout, volumes):
|
def down(self, timeout, volumes, skip_cluster_management):
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
try:
|
try:
|
||||||
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
return self.docker.compose.down(timeout=timeout, volumes=volumes)
|
||||||
|
@ -91,7 +91,7 @@ def create_deploy_context(
|
|||||||
return DeployCommandContext(stack, cluster_context, deployer)
|
return DeployCommandContext(stack, cluster_context, deployer)
|
||||||
|
|
||||||
|
|
||||||
def up_operation(ctx, services_list, stay_attached=False):
|
def up_operation(ctx, services_list, stay_attached=False, skip_cluster_management=False):
|
||||||
global_context = ctx.parent.parent.obj
|
global_context = ctx.parent.parent.obj
|
||||||
deploy_context = ctx.obj
|
deploy_context = ctx.obj
|
||||||
cluster_context = deploy_context.cluster_context
|
cluster_context = deploy_context.cluster_context
|
||||||
@ -102,18 +102,18 @@ def up_operation(ctx, services_list, stay_attached=False):
|
|||||||
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
|
||||||
for pre_start_command in cluster_context.pre_start_commands:
|
for pre_start_command in cluster_context.pre_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
_run_command(global_context, cluster_context.cluster, pre_start_command)
|
||||||
deploy_context.deployer.up(detach=not stay_attached, services=services_list)
|
deploy_context.deployer.up(detach=not stay_attached, skip_cluster_management=skip_cluster_management, services=services_list)
|
||||||
for post_start_command in cluster_context.post_start_commands:
|
for post_start_command in cluster_context.post_start_commands:
|
||||||
_run_command(global_context, cluster_context.cluster, post_start_command)
|
_run_command(global_context, cluster_context.cluster, post_start_command)
|
||||||
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
_orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.deployer, container_exec_env)
|
||||||
|
|
||||||
|
|
||||||
def down_operation(ctx, delete_volumes, extra_args_list):
|
def down_operation(ctx, delete_volumes, extra_args_list, skip_cluster_management=False):
|
||||||
timeout_arg = None
|
timeout_arg = None
|
||||||
if extra_args_list:
|
if extra_args_list:
|
||||||
timeout_arg = extra_args_list[0]
|
timeout_arg = extra_args_list[0]
|
||||||
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
|
||||||
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes)
|
ctx.obj.deployer.down(timeout=timeout_arg, volumes=delete_volumes, skip_cluster_management=skip_cluster_management)
|
||||||
|
|
||||||
|
|
||||||
def status_operation(ctx):
|
def status_operation(ctx):
|
||||||
|
@ -50,8 +50,10 @@ class LaconicStackSetupCommand:
|
|||||||
key_name: str
|
key_name: str
|
||||||
initialize_network: bool
|
initialize_network: bool
|
||||||
join_network: bool
|
join_network: bool
|
||||||
|
connect_network: bool
|
||||||
create_network: bool
|
create_network: bool
|
||||||
gentx_file_list: str
|
gentx_file_list: str
|
||||||
|
gentx_address_list: str
|
||||||
genesis_file: str
|
genesis_file: str
|
||||||
network_dir: str
|
network_dir: str
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
from stack_orchestrator.deploy.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
from stack_orchestrator.util import get_parsed_stack_config, get_yaml, get_pod_list, resolve_compose_file
|
||||||
@ -83,7 +82,9 @@ def run_container_command(ctx: DeployCommandContext, service: str, command: str,
|
|||||||
docker_output = deployer.run(
|
docker_output = deployer.run(
|
||||||
container_image,
|
container_image,
|
||||||
["-c", command], entrypoint="sh",
|
["-c", command], entrypoint="sh",
|
||||||
user=f"{os.getuid()}:{os.getgid()}",
|
# Current laconicd container has a bug where it crashes when run not as root
|
||||||
|
# Commented out line below is a workaround. Created files end up owned by root on the host
|
||||||
|
# user=f"{os.getuid()}:{os.getgid()}",
|
||||||
volumes=docker_volumes
|
volumes=docker_volumes
|
||||||
)
|
)
|
||||||
# There doesn't seem to be a way to get an exit code from docker.run()
|
# There doesn't seem to be a way to get an exit code from docker.run()
|
||||||
|
@ -20,11 +20,11 @@ from pathlib import Path
|
|||||||
class Deployer(ABC):
|
class Deployer(ABC):
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def up(self, detach, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def down(self, timeout, volumes):
|
def down(self, timeout, volumes, skip_cluster_management):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -61,47 +61,57 @@ def make_deploy_context(ctx) -> DeployCommandContext:
|
|||||||
cluster_name, env_file, deployment_type)
|
cluster_name, env_file, deployment_type)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove legacy up command since it's an alias for start
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
|
@click.option("--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def up(ctx, stay_attached, extra_args):
|
def up(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list, stay_attached)
|
up_operation(ctx, services_list, stay_attached, skip_cluster_management)
|
||||||
|
|
||||||
|
|
||||||
# start is the preferred alias for up
|
# start is the preferred alias for up
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
@click.option("--stay-attached/--detatch-terminal", default=False, help="detatch or not to see container stdout")
|
||||||
|
@click.option("--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def start(ctx, stay_attached, extra_args):
|
def start(ctx, stay_attached, skip_cluster_management, extra_args):
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
services_list = list(extra_args) or None
|
services_list = list(extra_args) or None
|
||||||
up_operation(ctx, services_list, stay_attached)
|
up_operation(ctx, services_list, stay_attached, skip_cluster_management)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove legacy up command since it's an alias for stop
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
|
@click.option("--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def down(ctx, delete_volumes, extra_args):
|
def down(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# Get the stack config file name
|
# Get the stack config file name
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
down_operation(ctx, delete_volumes, extra_args)
|
down_operation(ctx, delete_volumes, extra_args, skip_cluster_management)
|
||||||
|
|
||||||
|
|
||||||
# stop is the preferred alias for down
|
# stop is the preferred alias for down
|
||||||
@command.command()
|
@command.command()
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
|
||||||
|
@click.option("--skip-cluster-management/--perform-cluster-management",
|
||||||
|
default=False, help="Skip cluster initialization/tear-down (only for kind-k8s deployments)")
|
||||||
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def stop(ctx, delete_volumes, extra_args):
|
def stop(ctx, delete_volumes, skip_cluster_management, extra_args):
|
||||||
# TODO: add cluster name and env file here
|
# TODO: add cluster name and env file here
|
||||||
ctx.obj = make_deploy_context(ctx)
|
ctx.obj = make_deploy_context(ctx)
|
||||||
down_operation(ctx, delete_volumes, extra_args)
|
down_operation(ctx, delete_volumes, extra_args, skip_cluster_management)
|
||||||
|
|
||||||
|
|
||||||
@command.command()
|
@command.command()
|
||||||
|
@ -514,6 +514,23 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
os.mkdir(destination_script_dir)
|
os.mkdir(destination_script_dir)
|
||||||
script_paths = get_pod_script_paths(parsed_stack, pod)
|
script_paths = get_pod_script_paths(parsed_stack, pod)
|
||||||
_copy_files_to_directory(script_paths, destination_script_dir)
|
_copy_files_to_directory(script_paths, destination_script_dir)
|
||||||
|
if parsed_spec.is_kubernetes_deployment():
|
||||||
|
for configmap in parsed_spec.get_configmaps():
|
||||||
|
source_config_dir = resolve_config_dir(stack_name, configmap)
|
||||||
|
if os.path.exists(source_config_dir):
|
||||||
|
destination_config_dir = deployment_dir_path.joinpath("configmaps", configmap)
|
||||||
|
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
||||||
|
else:
|
||||||
|
# TODO: We should probably only do this if the volume is marked :ro.
|
||||||
|
for volume_name, volume_path in parsed_spec.get_volumes().items():
|
||||||
|
source_config_dir = resolve_config_dir(stack_name, volume_name)
|
||||||
|
# Only copy if the source exists and is _not_ empty.
|
||||||
|
if os.path.exists(source_config_dir) and os.listdir(source_config_dir):
|
||||||
|
destination_config_dir = deployment_dir_path.joinpath(volume_path)
|
||||||
|
# Only copy if the destination exists and _is_ empty.
|
||||||
|
if os.path.exists(destination_config_dir) and not os.listdir(destination_config_dir):
|
||||||
|
copytree(source_config_dir, destination_config_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
# Delegate to the stack's Python code
|
# Delegate to the stack's Python code
|
||||||
# The deploy create command doesn't require a --stack argument so we need to insert the
|
# The deploy create command doesn't require a --stack argument so we need to insert the
|
||||||
# stack member here.
|
# stack member here.
|
||||||
@ -535,15 +552,17 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, netw
|
|||||||
@click.option("--chain-id", help="The new chain id")
|
@click.option("--chain-id", help="The new chain id")
|
||||||
@click.option("--key-name", help="Name for new node key")
|
@click.option("--key-name", help="Name for new node key")
|
||||||
@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
|
@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
|
||||||
|
@click.option("--gentx-addresses", type=str, help="List of comma-delimited validator addresses for other nodes")
|
||||||
@click.option("--genesis-file", help="Genesis file for the network")
|
@click.option("--genesis-file", help="Genesis file for the network")
|
||||||
@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
|
@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
|
||||||
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
||||||
|
@click.option("--connect-network", is_flag=True, default=False, help="Connect phase")
|
||||||
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
||||||
@click.option("--network-dir", help="Directory for network files")
|
@click.option("--network-dir", help="Directory for network files")
|
||||||
@click.argument('extra_args', nargs=-1)
|
@click.argument('extra_args', nargs=-1)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def setup(ctx, node_moniker, chain_id, key_name, gentx_files, genesis_file, initialize_network, join_network, create_network,
|
def setup(ctx, node_moniker, chain_id, key_name, gentx_files, gentx_addresses, genesis_file, initialize_network, join_network,
|
||||||
network_dir, extra_args):
|
connect_network, create_network, network_dir, extra_args):
|
||||||
parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, create_network,
|
parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, connect_network,
|
||||||
gentx_files, genesis_file, network_dir)
|
create_network, gentx_files, gentx_addresses, genesis_file, network_dir)
|
||||||
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import base64
|
||||||
|
|
||||||
from kubernetes import client
|
from kubernetes import client
|
||||||
from typing import Any, List, Set
|
from typing import Any, List, Set
|
||||||
@ -78,28 +79,40 @@ class ClusterInfo:
|
|||||||
if (opts.o.debug):
|
if (opts.o.debug):
|
||||||
print(f"Env vars: {self.environment_variables.map}")
|
print(f"Env vars: {self.environment_variables.map}")
|
||||||
|
|
||||||
def get_nodeport(self):
|
def get_nodeports(self):
|
||||||
|
nodeports = []
|
||||||
for pod_name in self.parsed_pod_yaml_map:
|
for pod_name in self.parsed_pod_yaml_map:
|
||||||
pod = self.parsed_pod_yaml_map[pod_name]
|
pod = self.parsed_pod_yaml_map[pod_name]
|
||||||
services = pod["services"]
|
services = pod["services"]
|
||||||
for service_name in services:
|
for service_name in services:
|
||||||
service_info = services[service_name]
|
service_info = services[service_name]
|
||||||
if "ports" in service_info:
|
if "ports" in service_info:
|
||||||
port = int(service_info["ports"][0])
|
for raw_port in [str(p) for p in service_info["ports"]]:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"service port: {port}")
|
print(f"service port: {raw_port}")
|
||||||
service = client.V1Service(
|
if ":" in raw_port:
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport"),
|
parts = raw_port.split(":")
|
||||||
spec=client.V1ServiceSpec(
|
if len(parts) != 2:
|
||||||
type="NodePort",
|
raise Exception(f"Invalid port definition: {raw_port}")
|
||||||
ports=[client.V1ServicePort(
|
node_port = int(parts[0])
|
||||||
port=port,
|
pod_port = int(parts[1])
|
||||||
target_port=port
|
else:
|
||||||
)],
|
node_port = None
|
||||||
selector={"app": self.app_name}
|
pod_port = int(raw_port)
|
||||||
)
|
service = client.V1Service(
|
||||||
)
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-nodeport-{pod_port}"),
|
||||||
return service
|
spec=client.V1ServiceSpec(
|
||||||
|
type="NodePort",
|
||||||
|
ports=[client.V1ServicePort(
|
||||||
|
port=pod_port,
|
||||||
|
target_port=pod_port,
|
||||||
|
node_port=node_port
|
||||||
|
)],
|
||||||
|
selector={"app": self.app_name}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
nodeports.append(service)
|
||||||
|
return nodeports
|
||||||
|
|
||||||
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"):
|
||||||
# No ingress for a deployment that has no http-proxy defined, for now
|
# No ingress for a deployment that has no http-proxy defined, for now
|
||||||
@ -248,12 +261,12 @@ class ClusterInfo:
|
|||||||
for f in os.listdir(cfg_map_path):
|
for f in os.listdir(cfg_map_path):
|
||||||
full_path = os.path.join(cfg_map_path, f)
|
full_path = os.path.join(cfg_map_path, f)
|
||||||
if os.path.isfile(full_path):
|
if os.path.isfile(full_path):
|
||||||
data[f] = open(full_path, 'rt').read()
|
data[f] = base64.b64encode(open(full_path, 'rb').read()).decode('ASCII')
|
||||||
|
|
||||||
spec = client.V1ConfigMap(
|
spec = client.V1ConfigMap(
|
||||||
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
metadata=client.V1ObjectMeta(name=f"{self.app_name}-{cfg_map_name}",
|
||||||
labels={"configmap-label": cfg_map_name}),
|
labels={"configmap-label": cfg_map_name}),
|
||||||
data=data
|
binary_data=data
|
||||||
)
|
)
|
||||||
result.append(spec)
|
result.append(spec)
|
||||||
return result
|
return result
|
||||||
@ -353,6 +366,8 @@ class ClusterInfo:
|
|||||||
|
|
||||||
annotations = None
|
annotations = None
|
||||||
labels = {"app": self.app_name}
|
labels = {"app": self.app_name}
|
||||||
|
affinity = None
|
||||||
|
tolerations = None
|
||||||
|
|
||||||
if self.spec.get_annotations():
|
if self.spec.get_annotations():
|
||||||
annotations = {}
|
annotations = {}
|
||||||
@ -365,17 +380,60 @@ class ClusterInfo:
|
|||||||
for service_name in services:
|
for service_name in services:
|
||||||
labels[key.replace("{name}", service_name)] = value
|
labels[key.replace("{name}", service_name)] = value
|
||||||
|
|
||||||
|
if self.spec.get_node_affinities():
|
||||||
|
affinities = []
|
||||||
|
for rule in self.spec.get_node_affinities():
|
||||||
|
# TODO add some input validation here
|
||||||
|
label_name = rule['label']
|
||||||
|
label_value = rule['value']
|
||||||
|
affinities.append(client.V1NodeSelectorTerm(
|
||||||
|
match_expressions=[client.V1NodeSelectorRequirement(
|
||||||
|
key=label_name,
|
||||||
|
operator="In",
|
||||||
|
values=[label_value]
|
||||||
|
)]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
affinity = client.V1Affinity(
|
||||||
|
node_affinity=client.V1NodeAffinity(
|
||||||
|
required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
|
||||||
|
node_selector_terms=affinities
|
||||||
|
))
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.spec.get_node_tolerations():
|
||||||
|
tolerations = []
|
||||||
|
for toleration in self.spec.get_node_tolerations():
|
||||||
|
# TODO add some input validation here
|
||||||
|
toleration_key = toleration['key']
|
||||||
|
toleration_value = toleration['value']
|
||||||
|
tolerations.append(client.V1Toleration(
|
||||||
|
effect="NoSchedule",
|
||||||
|
key=toleration_key,
|
||||||
|
operator="Equal",
|
||||||
|
value=toleration_value
|
||||||
|
))
|
||||||
|
|
||||||
template = client.V1PodTemplateSpec(
|
template = client.V1PodTemplateSpec(
|
||||||
metadata=client.V1ObjectMeta(
|
metadata=client.V1ObjectMeta(
|
||||||
annotations=annotations,
|
annotations=annotations,
|
||||||
labels=labels
|
labels=labels
|
||||||
),
|
),
|
||||||
spec=client.V1PodSpec(containers=containers, image_pull_secrets=image_pull_secrets, volumes=volumes),
|
spec=client.V1PodSpec(
|
||||||
|
containers=containers,
|
||||||
|
image_pull_secrets=image_pull_secrets,
|
||||||
|
volumes=volumes,
|
||||||
|
affinity=affinity,
|
||||||
|
tolerations=tolerations
|
||||||
|
),
|
||||||
)
|
)
|
||||||
spec = client.V1DeploymentSpec(
|
spec = client.V1DeploymentSpec(
|
||||||
replicas=1, template=template, selector={
|
replicas=self.spec.get_replicas(),
|
||||||
|
template=template, selector={
|
||||||
"matchLabels":
|
"matchLabels":
|
||||||
{"app": self.app_name}})
|
{"app": self.app_name}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
deployment = client.V1Deployment(
|
deployment = client.V1Deployment(
|
||||||
api_version="apps/v1",
|
api_version="apps/v1",
|
||||||
|
@ -16,6 +16,7 @@ from datetime import datetime, timezone
|
|||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
|
from typing import List
|
||||||
|
|
||||||
from stack_orchestrator import constants
|
from stack_orchestrator import constants
|
||||||
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
from stack_orchestrator.deploy.deployer import Deployer, DeployerConfigGenerator
|
||||||
@ -51,12 +52,14 @@ class K8sDeployer(Deployer):
|
|||||||
networking_api: client.NetworkingV1Api
|
networking_api: client.NetworkingV1Api
|
||||||
k8s_namespace: str = "default"
|
k8s_namespace: str = "default"
|
||||||
kind_cluster_name: str
|
kind_cluster_name: str
|
||||||
|
skip_cluster_management: bool
|
||||||
cluster_info: ClusterInfo
|
cluster_info: ClusterInfo
|
||||||
deployment_dir: Path
|
deployment_dir: Path
|
||||||
deployment_context: DeploymentContext
|
deployment_context: DeploymentContext
|
||||||
|
|
||||||
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
def __init__(self, type, deployment_context: DeploymentContext, compose_files, compose_project_name, compose_env_file) -> None:
|
||||||
self.type = type
|
self.type = type
|
||||||
|
self.skip_cluster_management = False
|
||||||
# TODO: workaround pending refactoring above to cope with being created with a null deployment_context
|
# TODO: workaround pending refactoring above to cope with being created with a null deployment_context
|
||||||
if deployment_context is None:
|
if deployment_context is None:
|
||||||
return
|
return
|
||||||
@ -182,6 +185,7 @@ class K8sDeployer(Deployer):
|
|||||||
if len(host_parts) == 2:
|
if len(host_parts) == 2:
|
||||||
host_as_wild = f"*.{host_parts[1]}"
|
host_as_wild = f"*.{host_parts[1]}"
|
||||||
|
|
||||||
|
# TODO: resolve method deprecation below
|
||||||
now = datetime.utcnow().replace(tzinfo=timezone.utc)
|
now = datetime.utcnow().replace(tzinfo=timezone.utc)
|
||||||
fmt = "%Y-%m-%dT%H:%M:%S%z"
|
fmt = "%Y-%m-%dT%H:%M:%S%z"
|
||||||
|
|
||||||
@ -202,15 +206,16 @@ class K8sDeployer(Deployer):
|
|||||||
return cert
|
return cert
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def up(self, detach, services):
|
def up(self, detach, skip_cluster_management, services):
|
||||||
|
self.skip_cluster_management = skip_cluster_management
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
if self.is_kind():
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Create the kind cluster
|
# Create the kind cluster
|
||||||
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
create_cluster(self.kind_cluster_name, self.deployment_dir.joinpath(constants.kind_config_filename))
|
||||||
# Ensure the referenced containers are copied into kind
|
# Ensure the referenced containers are copied into kind
|
||||||
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
load_images_into_kind(self.kind_cluster_name, self.cluster_info.image_set)
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
if self.is_kind():
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Now configure an ingress controller (not installed by default in kind)
|
# Now configure an ingress controller (not installed by default in kind)
|
||||||
install_ingress_for_kind()
|
install_ingress_for_kind()
|
||||||
# Wait for ingress to start (deployment provisioning will fail unless this is done)
|
# Wait for ingress to start (deployment provisioning will fail unless this is done)
|
||||||
@ -246,8 +251,8 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No ingress configured")
|
print("No ingress configured")
|
||||||
|
|
||||||
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
|
||||||
if nodeport:
|
for nodeport in nodeports:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Sending this nodeport: {nodeport}")
|
print(f"Sending this nodeport: {nodeport}")
|
||||||
if not opts.o.dry_run:
|
if not opts.o.dry_run:
|
||||||
@ -259,7 +264,8 @@ class K8sDeployer(Deployer):
|
|||||||
print("NodePort created:")
|
print("NodePort created:")
|
||||||
print(f"{nodeport_resp}")
|
print(f"{nodeport_resp}")
|
||||||
|
|
||||||
def down(self, timeout, volumes): # noqa: C901
|
def down(self, timeout, volumes, skip_cluster_management): # noqa: C901
|
||||||
|
self.skip_cluster_management = skip_cluster_management
|
||||||
self.connect_api()
|
self.connect_api()
|
||||||
# Delete the k8s objects
|
# Delete the k8s objects
|
||||||
|
|
||||||
@ -342,10 +348,10 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No ingress to delete")
|
print("No ingress to delete")
|
||||||
|
|
||||||
nodeport: client.V1Service = self.cluster_info.get_nodeport()
|
nodeports: List[client.V1Service] = self.cluster_info.get_nodeports()
|
||||||
if nodeport:
|
for nodeport in nodeports:
|
||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print(f"Deleting this nodeport: {ingress}")
|
print(f"Deleting this nodeport: {nodeport}")
|
||||||
try:
|
try:
|
||||||
self.core_api.delete_namespaced_service(
|
self.core_api.delete_namespaced_service(
|
||||||
namespace=self.k8s_namespace,
|
namespace=self.k8s_namespace,
|
||||||
@ -357,7 +363,7 @@ class K8sDeployer(Deployer):
|
|||||||
if opts.o.debug:
|
if opts.o.debug:
|
||||||
print("No nodeport to delete")
|
print("No nodeport to delete")
|
||||||
|
|
||||||
if self.is_kind():
|
if self.is_kind() and not self.skip_cluster_management:
|
||||||
# Destroy the kind cluster
|
# Destroy the kind cluster
|
||||||
destroy_cluster(self.kind_cluster_name)
|
destroy_cluster(self.kind_cluster_name)
|
||||||
|
|
||||||
|
@ -117,6 +117,15 @@ class Spec:
|
|||||||
def get_annotations(self):
|
def get_annotations(self):
|
||||||
return self.obj.get(constants.annotations_key, {})
|
return self.obj.get(constants.annotations_key, {})
|
||||||
|
|
||||||
|
def get_replicas(self):
|
||||||
|
return self.obj.get(constants.replicas_key, 1)
|
||||||
|
|
||||||
|
def get_node_affinities(self):
|
||||||
|
return self.obj.get(constants.node_affinities_key, [])
|
||||||
|
|
||||||
|
def get_node_tolerations(self):
|
||||||
|
return self.obj.get(constants.node_tolerations_key, [])
|
||||||
|
|
||||||
def get_labels(self):
|
def get_labels(self):
|
||||||
return self.obj.get(constants.labels_key, {})
|
return self.obj.get(constants.labels_key, {})
|
||||||
|
|
||||||
|
@ -21,16 +21,30 @@ import sys
|
|||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
import yaml
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
import gnupg
|
||||||
|
|
||||||
from stack_orchestrator.deploy.images import remote_image_exists
|
from stack_orchestrator.deploy.images import remote_image_exists
|
||||||
from stack_orchestrator.deploy.webapp import deploy_webapp
|
from stack_orchestrator.deploy.webapp import deploy_webapp
|
||||||
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient, TimedLogger,
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
build_container_image, push_container_image,
|
AttrDict,
|
||||||
file_hash, deploy_to_k8s, publish_deployment,
|
LaconicRegistryClient,
|
||||||
hostname_for_deployment_request, generate_hostname_for_app,
|
TimedLogger,
|
||||||
match_owner, skip_by_tag)
|
build_container_image,
|
||||||
|
confirm_auction,
|
||||||
|
push_container_image,
|
||||||
|
file_hash,
|
||||||
|
deploy_to_k8s,
|
||||||
|
publish_deployment,
|
||||||
|
hostname_for_deployment_request,
|
||||||
|
generate_hostname_for_app,
|
||||||
|
match_owner,
|
||||||
|
skip_by_tag,
|
||||||
|
confirm_payment,
|
||||||
|
load_known_requests,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def process_app_deployment_request(
|
def process_app_deployment_request(
|
||||||
@ -45,12 +59,19 @@ def process_app_deployment_request(
|
|||||||
image_registry,
|
image_registry,
|
||||||
force_rebuild,
|
force_rebuild,
|
||||||
fqdn_policy,
|
fqdn_policy,
|
||||||
logger
|
recreate_on_deploy,
|
||||||
|
webapp_deployer_record,
|
||||||
|
gpg,
|
||||||
|
private_key_passphrase,
|
||||||
|
config_upload_dir,
|
||||||
|
logger,
|
||||||
):
|
):
|
||||||
logger.log("BEGIN - process_app_deployment_request")
|
logger.log("BEGIN - process_app_deployment_request")
|
||||||
|
|
||||||
# 1. look up application
|
# 1. look up application
|
||||||
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
app = laconic.get_record(
|
||||||
|
app_deployment_request.attributes.application, require=True
|
||||||
|
)
|
||||||
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
logger.log(f"Retrieved app record {app_deployment_request.attributes.application}")
|
||||||
|
|
||||||
# 2. determine dns
|
# 2. determine dns
|
||||||
@ -61,32 +82,64 @@ def process_app_deployment_request(
|
|||||||
if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
|
if "allow" == fqdn_policy or "preexisting" == fqdn_policy:
|
||||||
fqdn = requested_name
|
fqdn = requested_name
|
||||||
else:
|
else:
|
||||||
raise Exception(f"{requested_name} is invalid: only unqualified hostnames are allowed.")
|
raise Exception(
|
||||||
|
f"{requested_name} is invalid: only unqualified hostnames are allowed."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
fqdn = f"{requested_name}.{default_dns_suffix}"
|
fqdn = f"{requested_name}.{default_dns_suffix}"
|
||||||
|
|
||||||
|
# Normalize case (just in case)
|
||||||
|
fqdn = fqdn.lower()
|
||||||
|
|
||||||
# 3. check ownership of existing dnsrecord vs this request
|
# 3. check ownership of existing dnsrecord vs this request
|
||||||
dns_lrn = f"{dns_record_namespace}/{fqdn}"
|
dns_lrn = f"{dns_record_namespace}/{fqdn}"
|
||||||
dns_record = laconic.get_record(dns_lrn)
|
dns_record = laconic.get_record(dns_lrn)
|
||||||
if dns_record:
|
if dns_record:
|
||||||
matched_owner = match_owner(app_deployment_request, dns_record)
|
matched_owner = match_owner(app_deployment_request, dns_record)
|
||||||
if not matched_owner and dns_record.attributes.request:
|
if not matched_owner and dns_record.attributes.request:
|
||||||
matched_owner = match_owner(app_deployment_request, laconic.get_record(dns_record.attributes.request, require=True))
|
matched_owner = match_owner(
|
||||||
|
app_deployment_request,
|
||||||
|
laconic.get_record(dns_record.attributes.request, require=True),
|
||||||
|
)
|
||||||
|
|
||||||
if matched_owner:
|
if matched_owner:
|
||||||
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
logger.log(f"Matched DnsRecord ownership: {matched_owner}")
|
||||||
else:
|
else:
|
||||||
raise Exception("Unable to confirm ownership of DnsRecord %s for request %s" %
|
raise Exception(
|
||||||
(dns_lrn, app_deployment_request.id))
|
"Unable to confirm ownership of DnsRecord %s for request %s"
|
||||||
|
% (dns_lrn, app_deployment_request.id)
|
||||||
|
)
|
||||||
elif "preexisting" == fqdn_policy:
|
elif "preexisting" == fqdn_policy:
|
||||||
raise Exception(f"No pre-existing DnsRecord {dns_lrn} could be found for request {app_deployment_request.id}.")
|
raise Exception(
|
||||||
|
f"No pre-existing DnsRecord {dns_lrn} could be found for request {app_deployment_request.id}."
|
||||||
|
)
|
||||||
|
|
||||||
# 4. get build and runtime config from request
|
# 4. get build and runtime config from request
|
||||||
|
env = {}
|
||||||
|
if app_deployment_request.attributes.config:
|
||||||
|
if "ref" in app_deployment_request.attributes.config:
|
||||||
|
with open(
|
||||||
|
f"{config_upload_dir}/{app_deployment_request.attributes.config.ref}",
|
||||||
|
"rb",
|
||||||
|
) as file:
|
||||||
|
record_owner = laconic.get_owner(app_deployment_request)
|
||||||
|
decrypted = gpg.decrypt_file(file, passphrase=private_key_passphrase)
|
||||||
|
parsed = AttrDict(yaml.safe_load(decrypted.data))
|
||||||
|
if record_owner not in parsed.authorized:
|
||||||
|
raise Exception(
|
||||||
|
f"{record_owner} not authorized to access config {app_deployment_request.attributes.config.ref}"
|
||||||
|
)
|
||||||
|
if "env" in parsed.config:
|
||||||
|
env.update(parsed.config.env)
|
||||||
|
|
||||||
|
if "env" in app_deployment_request.attributes.config:
|
||||||
|
env.update(app_deployment_request.attributes.config.env)
|
||||||
|
|
||||||
env_filename = None
|
env_filename = None
|
||||||
if app_deployment_request.attributes.config and "env" in app_deployment_request.attributes.config:
|
if env:
|
||||||
env_filename = tempfile.mktemp()
|
env_filename = tempfile.mktemp()
|
||||||
with open(env_filename, 'w') as file:
|
with open(env_filename, "w") as file:
|
||||||
for k, v in app_deployment_request.attributes.config["env"].items():
|
for k, v in env.items():
|
||||||
file.write("%s=%s\n" % (k, shlex.quote(str(v))))
|
file.write("%s=%s\n" % (k, shlex.quote(str(v))))
|
||||||
|
|
||||||
# 5. determine new or existing deployment
|
# 5. determine new or existing deployment
|
||||||
@ -95,7 +148,10 @@ def process_app_deployment_request(
|
|||||||
if app_deployment_request.attributes.deployment:
|
if app_deployment_request.attributes.deployment:
|
||||||
app_deployment_lrn = app_deployment_request.attributes.deployment
|
app_deployment_lrn = app_deployment_request.attributes.deployment
|
||||||
if not app_deployment_lrn.startswith(deployment_record_namespace):
|
if not app_deployment_lrn.startswith(deployment_record_namespace):
|
||||||
raise Exception("Deployment CRN %s is not in a supported namespace" % app_deployment_request.attributes.deployment)
|
raise Exception(
|
||||||
|
"Deployment LRN %s is not in a supported namespace"
|
||||||
|
% app_deployment_request.attributes.deployment
|
||||||
|
)
|
||||||
|
|
||||||
deployment_record = laconic.get_record(app_deployment_lrn)
|
deployment_record = laconic.get_record(app_deployment_lrn)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
deployment_dir = os.path.join(deployment_parent_dir, fqdn)
|
||||||
@ -108,20 +164,37 @@ def process_app_deployment_request(
|
|||||||
# b. check for deployment directory (create if necessary)
|
# b. check for deployment directory (create if necessary)
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
if deployment_record:
|
if deployment_record:
|
||||||
raise Exception("Deployment record %s exists, but not deployment dir %s. Please remove name." %
|
raise Exception(
|
||||||
(app_deployment_lrn, deployment_dir))
|
"Deployment record %s exists, but not deployment dir %s. Please remove name."
|
||||||
logger.log(f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}")
|
% (app_deployment_lrn, deployment_dir)
|
||||||
deploy_webapp.create_deployment(ctx, deployment_dir, deployment_container_tag,
|
)
|
||||||
f"https://{fqdn}", kube_config, image_registry, env_filename)
|
logger.log(
|
||||||
|
f"Creating webapp deployment in: {deployment_dir} with container id: {deployment_container_tag}"
|
||||||
|
)
|
||||||
|
deploy_webapp.create_deployment(
|
||||||
|
ctx,
|
||||||
|
deployment_dir,
|
||||||
|
deployment_container_tag,
|
||||||
|
f"https://{fqdn}",
|
||||||
|
kube_config,
|
||||||
|
image_registry,
|
||||||
|
env_filename,
|
||||||
|
)
|
||||||
elif env_filename:
|
elif env_filename:
|
||||||
shutil.copyfile(env_filename, deployment_config_file)
|
shutil.copyfile(env_filename, deployment_config_file)
|
||||||
|
|
||||||
needs_k8s_deploy = False
|
needs_k8s_deploy = False
|
||||||
if force_rebuild:
|
if force_rebuild:
|
||||||
logger.log("--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app")
|
logger.log(
|
||||||
|
"--force-rebuild is enabled so the container will always be built now, even if nothing has changed in the app"
|
||||||
|
)
|
||||||
# 6. build container (if needed)
|
# 6. build container (if needed)
|
||||||
# TODO: add a comment that explains what this code is doing (not clear to me)
|
# TODO: add a comment that explains what this code is doing (not clear to me)
|
||||||
if not deployment_record or deployment_record.attributes.application != app.id or force_rebuild:
|
if (
|
||||||
|
not deployment_record
|
||||||
|
or deployment_record.attributes.application != app.id
|
||||||
|
or force_rebuild
|
||||||
|
):
|
||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
# check if the image already exists
|
# check if the image already exists
|
||||||
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
shared_tag_exists = remote_image_exists(image_registry, app_image_shared_tag)
|
||||||
@ -136,13 +209,15 @@ def process_app_deployment_request(
|
|||||||
logger.log(
|
logger.log(
|
||||||
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
f"(SKIPPED) Existing image found for this app: {app_image_shared_tag} "
|
||||||
"tagging it with: {deployment_container_tag} to use in this deployment"
|
"tagging it with: {deployment_container_tag} to use in this deployment"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
# add_tags_to_image(image_registry, app_image_shared_tag, deployment_container_tag)
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
extra_build_args = [] # TODO: pull from request
|
extra_build_args = [] # TODO: pull from request
|
||||||
logger.log(f"Building container image: {deployment_container_tag}")
|
logger.log(f"Building container image: {deployment_container_tag}")
|
||||||
build_container_image(app, deployment_container_tag, extra_build_args, logger)
|
build_container_image(
|
||||||
|
app, deployment_container_tag, extra_build_args, logger
|
||||||
|
)
|
||||||
logger.log("Build complete")
|
logger.log("Build complete")
|
||||||
logger.log(f"Pushing container image: {deployment_container_tag}")
|
logger.log(f"Pushing container image: {deployment_container_tag}")
|
||||||
push_container_image(deployment_dir, logger)
|
push_container_image(deployment_dir, logger)
|
||||||
@ -150,23 +225,22 @@ def process_app_deployment_request(
|
|||||||
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
# The build/push commands above will use the unique deployment tag, so now we need to add the shared tag.
|
||||||
logger.log(
|
logger.log(
|
||||||
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
f"(SKIPPED) Adding global app image tag: {app_image_shared_tag} to newly built image: {deployment_container_tag}"
|
||||||
)
|
)
|
||||||
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
# add_tags_to_image(image_registry, deployment_container_tag, app_image_shared_tag)
|
||||||
logger.log("Tag complete")
|
logger.log("Tag complete")
|
||||||
else:
|
else:
|
||||||
logger.log("Requested app is already deployed, skipping build and image push")
|
logger.log("Requested app is already deployed, skipping build and image push")
|
||||||
|
|
||||||
# 7. update config (if needed)
|
# 7. update config (if needed)
|
||||||
if not deployment_record or file_hash(deployment_config_file) != deployment_record.attributes.meta.config:
|
if (
|
||||||
|
not deployment_record
|
||||||
|
or file_hash(deployment_config_file) != deployment_record.attributes.meta.config
|
||||||
|
):
|
||||||
needs_k8s_deploy = True
|
needs_k8s_deploy = True
|
||||||
|
|
||||||
# 8. update k8s deployment
|
# 8. update k8s deployment
|
||||||
if needs_k8s_deploy:
|
if needs_k8s_deploy:
|
||||||
deploy_to_k8s(
|
deploy_to_k8s(deployment_record, deployment_dir, recreate_on_deploy, logger)
|
||||||
deployment_record,
|
|
||||||
deployment_dir,
|
|
||||||
logger
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.log("Publishing deployment to registry.")
|
logger.log("Publishing deployment to registry.")
|
||||||
publish_deployment(
|
publish_deployment(
|
||||||
@ -178,54 +252,152 @@ def process_app_deployment_request(
|
|||||||
dns_lrn,
|
dns_lrn,
|
||||||
deployment_dir,
|
deployment_dir,
|
||||||
app_deployment_request,
|
app_deployment_request,
|
||||||
logger
|
webapp_deployer_record,
|
||||||
|
logger,
|
||||||
)
|
)
|
||||||
logger.log("Publication complete.")
|
logger.log("Publication complete.")
|
||||||
logger.log("END - process_app_deployment_request")
|
logger.log("END - process_app_deployment_request")
|
||||||
|
|
||||||
|
|
||||||
def load_known_requests(filename):
|
|
||||||
if filename and os.path.exists(filename):
|
|
||||||
return json.load(open(filename, "r"))
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def dump_known_requests(filename, requests, status="SEEN"):
|
def dump_known_requests(filename, requests, status="SEEN"):
|
||||||
if not filename:
|
if not filename:
|
||||||
return
|
return
|
||||||
known_requests = load_known_requests(filename)
|
known_requests = load_known_requests(filename)
|
||||||
for r in requests:
|
for r in requests:
|
||||||
known_requests[r.id] = {
|
known_requests[r.id] = {"createTime": r.createTime, "status": status}
|
||||||
"createTime": r.createTime,
|
|
||||||
"status": status
|
|
||||||
}
|
|
||||||
with open(filename, "w") as f:
|
with open(filename, "w") as f:
|
||||||
json.dump(known_requests, f)
|
json.dump(known_requests, f)
|
||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
@click.option("--kube-config", help="Provide a config file for a k8s deployment")
|
||||||
@click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
|
@click.option(
|
||||||
@click.option("--image-registry", help="Provide a container image registry url for this k8s cluster")
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
@click.option("--deployment-parent-dir", help="Create deployment directories beneath this directory", required=True)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--image-registry",
|
||||||
|
help="Provide a container image registry url for this k8s cluster",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment-parent-dir",
|
||||||
|
help="Create deployment directories beneath this directory",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
@click.option("--request-id", help="The ApplicationDeploymentRequest to process")
|
@click.option("--request-id", help="The ApplicationDeploymentRequest to process")
|
||||||
@click.option("--discover", help="Discover and process all pending ApplicationDeploymentRequests", is_flag=True, default=False)
|
@click.option(
|
||||||
@click.option("--state-file", help="File to store state about previously seen requests.")
|
"--discover",
|
||||||
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
|
help="Discover and process all pending ApplicationDeploymentRequests",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--state-file", help="File to store state about previously seen requests."
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--only-update-state",
|
||||||
|
help="Only update the state file, don't process any requests anything.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
|
@click.option("--dns-suffix", help="DNS domain to use eg, laconic.servesthe.world")
|
||||||
@click.option("--fqdn-policy", help="How to handle requests with an FQDN: prohibit, allow, preexisting", default="prohibit")
|
@click.option(
|
||||||
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns")
|
"--fqdn-policy",
|
||||||
@click.option("--record-namespace-deployments", help="eg, lrn://laconic/deployments")
|
help="How to handle requests with an FQDN: prohibit, allow, preexisting",
|
||||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
default="prohibit",
|
||||||
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
)
|
||||||
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
@click.option("--record-namespace-dns", help="eg, lrn://laconic/dns", required=True)
|
||||||
@click.option("--force-rebuild", help="Rebuild even if the image already exists.", is_flag=True)
|
@click.option(
|
||||||
@click.option("--log-dir", help="Output build/deployment logs to directory.", default=None)
|
"--record-namespace-deployments",
|
||||||
|
help="eg, lrn://laconic/deployments",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--include-tags",
|
||||||
|
help="Only include requests with matching tags (comma-separated).",
|
||||||
|
default="",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--exclude-tags",
|
||||||
|
help="Exclude requests with matching tags (comma-separated).",
|
||||||
|
default="",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--force-rebuild", help="Rebuild even if the image already exists.", is_flag=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--recreate-on-deploy",
|
||||||
|
help="Remove and recreate deployments instead of updating them.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--log-dir", help="Output build/deployment logs to directory.", default=None
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--min-required-payment",
|
||||||
|
help="Requests must have a minimum payment to be processed (in alnt)",
|
||||||
|
default=0,
|
||||||
|
)
|
||||||
|
@click.option("--lrn", help="The LRN of this deployer.", required=True)
|
||||||
|
@click.option(
|
||||||
|
"--all-requests",
|
||||||
|
help="Handle requests addressed to anyone (by default only requests to"
|
||||||
|
"my payment address are examined).",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--auction-requests",
|
||||||
|
help="Handle requests with auction id set (skips payment confirmation).",
|
||||||
|
is_flag=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--config-upload-dir",
|
||||||
|
help="The directory containing uploaded config.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--private-key-file", help="The private key for decrypting config.", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--private-key-passphrase",
|
||||||
|
help="The passphrase for the private key.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_dir, # noqa: C901
|
def command( # noqa: C901
|
||||||
request_id, discover, state_file, only_update_state,
|
ctx,
|
||||||
dns_suffix, fqdn_policy, record_namespace_dns, record_namespace_deployments, dry_run,
|
kube_config,
|
||||||
include_tags, exclude_tags, force_rebuild, log_dir):
|
laconic_config,
|
||||||
|
image_registry,
|
||||||
|
deployment_parent_dir,
|
||||||
|
request_id,
|
||||||
|
discover,
|
||||||
|
state_file,
|
||||||
|
only_update_state,
|
||||||
|
dns_suffix,
|
||||||
|
fqdn_policy,
|
||||||
|
record_namespace_dns,
|
||||||
|
record_namespace_deployments,
|
||||||
|
dry_run,
|
||||||
|
include_tags,
|
||||||
|
exclude_tags,
|
||||||
|
force_rebuild,
|
||||||
|
recreate_on_deploy,
|
||||||
|
log_dir,
|
||||||
|
min_required_payment,
|
||||||
|
lrn,
|
||||||
|
config_upload_dir,
|
||||||
|
private_key_file,
|
||||||
|
private_key_passphrase,
|
||||||
|
all_requests,
|
||||||
|
auction_requests,
|
||||||
|
registry_lock_file,
|
||||||
|
):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
@ -239,140 +411,289 @@ def command(ctx, kube_config, laconic_config, image_registry, deployment_parent_
|
|||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
if not only_update_state:
|
if not only_update_state:
|
||||||
if not record_namespace_dns or not record_namespace_deployments or not dns_suffix:
|
if (
|
||||||
print("--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required", file=sys.stderr)
|
not record_namespace_dns
|
||||||
|
or not record_namespace_deployments
|
||||||
|
or not dns_suffix
|
||||||
|
):
|
||||||
|
print(
|
||||||
|
"--dns-suffix, --record-namespace-dns, and --record-namespace-deployments are all required",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
if fqdn_policy not in ["prohibit", "allow", "preexisting"]:
|
if fqdn_policy not in ["prohibit", "allow", "preexisting"]:
|
||||||
print("--fqdn-policy must be one of 'prohibit', 'allow', or 'preexisting'", file=sys.stderr)
|
print(
|
||||||
|
"--fqdn-policy must be one of 'prohibit', 'allow', or 'preexisting'",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Split CSV and clean up values.
|
tempdir = tempfile.mkdtemp()
|
||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
gpg = gnupg.GPG(gnupghome=tempdir)
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config)
|
# Import the deployer's public key
|
||||||
|
result = gpg.import_keys(open(private_key_file, "rb").read())
|
||||||
|
if 1 != result.imported:
|
||||||
|
print(
|
||||||
|
f"Failed to load private key file: {private_key_file}.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment requests.
|
main_logger = TimedLogger(file=sys.stderr)
|
||||||
# single request
|
|
||||||
if request_id:
|
|
||||||
requests = [laconic.get_record(request_id, require=True)]
|
|
||||||
# all requests
|
|
||||||
elif discover:
|
|
||||||
requests = laconic.app_deployment_requests()
|
|
||||||
|
|
||||||
if only_update_state:
|
try:
|
||||||
if not dry_run:
|
# Split CSV and clean up values.
|
||||||
dump_known_requests(state_file, requests)
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
return
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
previous_requests = load_known_requests(state_file)
|
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
||||||
|
webapp_deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
payment_address = webapp_deployer_record.attributes.paymentAddress
|
||||||
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
# Collapse related requests.
|
if min_required_payment and not payment_address:
|
||||||
requests.sort(key=lambda r: r.createTime)
|
print(
|
||||||
requests.reverse()
|
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
||||||
requests_by_name = {}
|
file=sys.stderr,
|
||||||
skipped_by_name = {}
|
)
|
||||||
for r in requests:
|
sys.exit(2)
|
||||||
if r.id in previous_requests and previous_requests[r.id].get("status", "") != "RETRY":
|
|
||||||
print(f"Skipping request {r.id}, we've already seen it.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
app = laconic.get_record(r.attributes.application)
|
# Find deployment requests.
|
||||||
if not app:
|
# single request
|
||||||
print("Skipping request %s, cannot locate app." % r.id)
|
if request_id:
|
||||||
continue
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
|
requests = [laconic.get_record(request_id, require=True)]
|
||||||
requested_name = r.attributes.dns
|
# all requests
|
||||||
if not requested_name:
|
elif discover:
|
||||||
requested_name = generate_hostname_for_app(app)
|
main_logger.log("Discovering deployment requests...")
|
||||||
print("Generating name %s for request %s." % (requested_name, r.id))
|
if all_requests:
|
||||||
|
requests = laconic.app_deployment_requests()
|
||||||
if requested_name in skipped_by_name or requested_name in requests_by_name:
|
|
||||||
print("Ignoring request %s, it has been superseded." % r.id)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if skip_by_tag(r, include_tags, exclude_tags):
|
|
||||||
print("Skipping request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id,
|
|
||||||
include_tags,
|
|
||||||
exclude_tags,
|
|
||||||
r.attributes.tags))
|
|
||||||
skipped_by_name[requested_name] = r
|
|
||||||
continue
|
|
||||||
|
|
||||||
print("Found request %s to run application %s on %s." % (r.id, r.attributes.application, requested_name))
|
|
||||||
requests_by_name[requested_name] = r
|
|
||||||
|
|
||||||
# Find deployments.
|
|
||||||
deployments = laconic.app_deployments()
|
|
||||||
deployments_by_request = {}
|
|
||||||
for d in deployments:
|
|
||||||
if d.attributes.request:
|
|
||||||
deployments_by_request[d.attributes.request] = d
|
|
||||||
|
|
||||||
# Find removal requests.
|
|
||||||
cancellation_requests = {}
|
|
||||||
removal_requests = laconic.app_deployment_removal_requests()
|
|
||||||
for r in removal_requests:
|
|
||||||
if r.attributes.request:
|
|
||||||
cancellation_requests[r.attributes.request] = r
|
|
||||||
|
|
||||||
requests_to_execute = []
|
|
||||||
for r in requests_by_name.values():
|
|
||||||
if r.id in cancellation_requests and match_owner(cancellation_requests[r.id], r):
|
|
||||||
print(f"Found deployment cancellation request for {r.id} at {cancellation_requests[r.id].id}")
|
|
||||||
elif r.id in deployments_by_request:
|
|
||||||
print(f"Found satisfied request for {r.id} at {deployments_by_request[r.id].id}")
|
|
||||||
else:
|
|
||||||
if r.id not in previous_requests:
|
|
||||||
print(f"Request {r.id} needs to processed.")
|
|
||||||
requests_to_execute.append(r)
|
|
||||||
else:
|
else:
|
||||||
print(
|
requests = laconic.app_deployment_requests({"deployer": lrn})
|
||||||
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Found %d unsatisfied request(s) to process." % len(requests_to_execute))
|
if only_update_state:
|
||||||
|
if not dry_run:
|
||||||
|
dump_known_requests(state_file, requests)
|
||||||
|
return
|
||||||
|
|
||||||
if not dry_run:
|
previous_requests = {}
|
||||||
for r in requests_to_execute:
|
if state_file:
|
||||||
dump_known_requests(state_file, [r], "DEPLOYING")
|
main_logger.log(f"Loading known requests from {state_file}...")
|
||||||
status = "ERROR"
|
previous_requests = load_known_requests(state_file)
|
||||||
run_log_file = None
|
|
||||||
run_reg_client = laconic
|
# Collapse related requests.
|
||||||
|
requests.sort(key=lambda r: r.createTime)
|
||||||
|
requests.reverse()
|
||||||
|
requests_by_name = {}
|
||||||
|
skipped_by_name = {}
|
||||||
|
for r in requests:
|
||||||
|
main_logger.log(f"BEGIN: Examining request {r.id}")
|
||||||
|
result = "PENDING"
|
||||||
try:
|
try:
|
||||||
run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
if (
|
||||||
if log_dir:
|
r.id in previous_requests
|
||||||
run_log_dir = os.path.join(log_dir, r.id)
|
and previous_requests[r.id].get("status", "") != "RETRY"
|
||||||
if not os.path.exists(run_log_dir):
|
):
|
||||||
os.mkdir(run_log_dir)
|
main_logger.log(f"Skipping request {r.id}, we've already seen it.")
|
||||||
run_log_file_path = os.path.join(run_log_dir, f"{run_id}.log")
|
result = "SKIP"
|
||||||
print(f"Directing deployment logs to: {run_log_file_path}")
|
continue
|
||||||
run_log_file = open(run_log_file_path, "wt")
|
|
||||||
run_reg_client = LaconicRegistryClient(laconic_config, log_file=run_log_file)
|
|
||||||
|
|
||||||
logger = TimedLogger(run_id, run_log_file)
|
app = laconic.get_record(r.attributes.application)
|
||||||
logger.log("Processing ...")
|
if not app:
|
||||||
process_app_deployment_request(
|
main_logger.log(f"Skipping request {r.id}, cannot locate app.")
|
||||||
ctx,
|
result = "ERROR"
|
||||||
run_reg_client,
|
continue
|
||||||
r,
|
|
||||||
record_namespace_deployments,
|
requested_name = r.attributes.dns
|
||||||
record_namespace_dns,
|
if not requested_name:
|
||||||
dns_suffix,
|
requested_name = generate_hostname_for_app(app)
|
||||||
os.path.abspath(deployment_parent_dir),
|
main_logger.log(
|
||||||
kube_config,
|
"Generating name %s for request %s." % (requested_name, r.id)
|
||||||
image_registry,
|
)
|
||||||
force_rebuild,
|
|
||||||
fqdn_policy,
|
if (
|
||||||
logger
|
requested_name in skipped_by_name
|
||||||
|
or requested_name in requests_by_name
|
||||||
|
):
|
||||||
|
main_logger.log(
|
||||||
|
"Ignoring request %s, it has been superseded." % r.id
|
||||||
|
)
|
||||||
|
result = "SKIP"
|
||||||
|
continue
|
||||||
|
|
||||||
|
if skip_by_tag(r, include_tags, exclude_tags):
|
||||||
|
main_logger.log(
|
||||||
|
"Skipping request %s, filtered by tag (include %s, exclude %s, present %s)"
|
||||||
|
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
||||||
|
)
|
||||||
|
skipped_by_name[requested_name] = r
|
||||||
|
result = "SKIP"
|
||||||
|
continue
|
||||||
|
|
||||||
|
main_logger.log(
|
||||||
|
"Found pending request %s to run application %s on %s."
|
||||||
|
% (r.id, r.attributes.application, requested_name)
|
||||||
)
|
)
|
||||||
status = "DEPLOYED"
|
requests_by_name[requested_name] = r
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log("ERROR: " + str(e))
|
result = "ERROR"
|
||||||
|
main_logger.log(f"ERROR examining request {r.id}: " + str(e))
|
||||||
finally:
|
finally:
|
||||||
if logger:
|
main_logger.log(f"DONE Examining request {r.id} with result {result}.")
|
||||||
logger.log(f"DONE with status {status}", show_step_time=False, show_total_time=True)
|
if result in ["ERROR"]:
|
||||||
dump_known_requests(state_file, [r], status)
|
dump_known_requests(state_file, [r], status=result)
|
||||||
if run_log_file:
|
|
||||||
run_log_file.close()
|
# Find deployments.
|
||||||
|
main_logger.log("Discovering existing app deployments...")
|
||||||
|
if all_requests:
|
||||||
|
deployments = laconic.app_deployments()
|
||||||
|
else:
|
||||||
|
deployments = laconic.app_deployments({"deployer": lrn})
|
||||||
|
deployments_by_request = {}
|
||||||
|
for d in deployments:
|
||||||
|
if d.attributes.request:
|
||||||
|
deployments_by_request[d.attributes.request] = d
|
||||||
|
|
||||||
|
# Find removal requests.
|
||||||
|
main_logger.log("Discovering deployment removal and cancellation requests...")
|
||||||
|
cancellation_requests = {}
|
||||||
|
removal_requests = laconic.app_deployment_removal_requests()
|
||||||
|
for r in removal_requests:
|
||||||
|
if r.attributes.request:
|
||||||
|
cancellation_requests[r.attributes.request] = r
|
||||||
|
|
||||||
|
requests_to_check_for_payment = []
|
||||||
|
for r in requests_by_name.values():
|
||||||
|
if r.id in cancellation_requests and match_owner(
|
||||||
|
cancellation_requests[r.id], r
|
||||||
|
):
|
||||||
|
main_logger.log(
|
||||||
|
f"Found deployment cancellation request for {r.id} at {cancellation_requests[r.id].id}"
|
||||||
|
)
|
||||||
|
elif r.id in deployments_by_request:
|
||||||
|
main_logger.log(
|
||||||
|
f"Found satisfied request for {r.id} at {deployments_by_request[r.id].id}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if (
|
||||||
|
r.id in previous_requests
|
||||||
|
and previous_requests[r.id].get("status", "") != "RETRY"
|
||||||
|
):
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
main_logger.log(f"Request {r.id} needs to processed.")
|
||||||
|
requests_to_check_for_payment.append(r)
|
||||||
|
|
||||||
|
requests_to_execute = []
|
||||||
|
for r in requests_to_check_for_payment:
|
||||||
|
if r.attributes.auction:
|
||||||
|
if auction_requests:
|
||||||
|
if confirm_auction(
|
||||||
|
laconic,
|
||||||
|
r,
|
||||||
|
lrn,
|
||||||
|
payment_address,
|
||||||
|
main_logger
|
||||||
|
):
|
||||||
|
main_logger.log(f"{r.id}: Auction confirmed.")
|
||||||
|
requests_to_execute.append(r)
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping request {r.id}: unable to verify auction."
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status="SKIP")
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping request {r.id}: not handling requests with auction."
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status="SKIP")
|
||||||
|
elif min_required_payment:
|
||||||
|
main_logger.log(f"{r.id}: Confirming payment...")
|
||||||
|
if confirm_payment(
|
||||||
|
laconic,
|
||||||
|
r,
|
||||||
|
payment_address,
|
||||||
|
min_required_payment,
|
||||||
|
main_logger,
|
||||||
|
):
|
||||||
|
main_logger.log(f"{r.id}: Payment confirmed.")
|
||||||
|
requests_to_execute.append(r)
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping request {r.id}: unable to verify payment."
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status="UNPAID")
|
||||||
|
else:
|
||||||
|
requests_to_execute.append(r)
|
||||||
|
|
||||||
|
main_logger.log(
|
||||||
|
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
for r in requests_to_execute:
|
||||||
|
main_logger.log(f"DEPLOYING {r.id}: BEGIN")
|
||||||
|
dump_known_requests(state_file, [r], "DEPLOYING")
|
||||||
|
status = "ERROR"
|
||||||
|
run_log_file = None
|
||||||
|
run_reg_client = laconic
|
||||||
|
try:
|
||||||
|
run_id = f"{r.id}-{str(time.time()).split('.')[0]}-{str(uuid.uuid4()).split('-')[0]}"
|
||||||
|
if log_dir:
|
||||||
|
run_log_dir = os.path.join(log_dir, r.id)
|
||||||
|
if not os.path.exists(run_log_dir):
|
||||||
|
os.mkdir(run_log_dir)
|
||||||
|
run_log_file_path = os.path.join(run_log_dir, f"{run_id}.log")
|
||||||
|
main_logger.log(
|
||||||
|
f"Directing deployment logs to: {run_log_file_path}"
|
||||||
|
)
|
||||||
|
run_log_file = open(run_log_file_path, "wt")
|
||||||
|
run_reg_client = LaconicRegistryClient(
|
||||||
|
laconic_config, log_file=run_log_file, mutex_lock_file=registry_lock_file
|
||||||
|
)
|
||||||
|
|
||||||
|
build_logger = TimedLogger(run_id, run_log_file)
|
||||||
|
build_logger.log("Processing ...")
|
||||||
|
process_app_deployment_request(
|
||||||
|
ctx,
|
||||||
|
run_reg_client,
|
||||||
|
r,
|
||||||
|
record_namespace_deployments,
|
||||||
|
record_namespace_dns,
|
||||||
|
dns_suffix,
|
||||||
|
os.path.abspath(deployment_parent_dir),
|
||||||
|
kube_config,
|
||||||
|
image_registry,
|
||||||
|
force_rebuild,
|
||||||
|
fqdn_policy,
|
||||||
|
recreate_on_deploy,
|
||||||
|
webapp_deployer_record,
|
||||||
|
gpg,
|
||||||
|
private_key_passphrase,
|
||||||
|
config_upload_dir,
|
||||||
|
build_logger,
|
||||||
|
)
|
||||||
|
status = "DEPLOYED"
|
||||||
|
except Exception as e:
|
||||||
|
main_logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
|
build_logger.log("ERROR: " + str(e))
|
||||||
|
finally:
|
||||||
|
main_logger.log(f"DEPLOYING {r.id}: END - {status}")
|
||||||
|
if build_logger:
|
||||||
|
build_logger.log(
|
||||||
|
f"DONE with status {status}",
|
||||||
|
show_step_time=False,
|
||||||
|
show_total_time=True,
|
||||||
|
)
|
||||||
|
dump_known_requests(state_file, [r], status)
|
||||||
|
if run_log_file:
|
||||||
|
run_log_file.close()
|
||||||
|
except Exception as e:
|
||||||
|
main_logger.log("UNCAUGHT ERROR:" + str(e))
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tempdir, ignore_errors=True)
|
||||||
|
220
stack_orchestrator/deploy/webapp/handle_deployment_auction.py
Normal file
220
stack_orchestrator/deploy/webapp/handle_deployment_auction.py
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AttrDict,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
TimedLogger,
|
||||||
|
load_known_requests,
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
AuctionStatus,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def process_app_deployment_auction(
|
||||||
|
ctx,
|
||||||
|
laconic: LaconicRegistryClient,
|
||||||
|
request,
|
||||||
|
current_status,
|
||||||
|
reveal_file_path,
|
||||||
|
bid_amount,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
# Fetch auction details
|
||||||
|
auction_id = request.attributes.auction
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
if not auction:
|
||||||
|
raise Exception(f"Unable to locate auction: {auction_id}")
|
||||||
|
|
||||||
|
# Check auction kind
|
||||||
|
if auction.kind != AUCTION_KIND_PROVIDER:
|
||||||
|
raise Exception(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}")
|
||||||
|
|
||||||
|
if current_status == "PENDING":
|
||||||
|
# Skip if pending auction not in commit state
|
||||||
|
if auction.status != AuctionStatus.COMMIT:
|
||||||
|
logger.log(f"Skipping pending request, auction {auction_id} status: {auction.status}")
|
||||||
|
return "SKIP", ""
|
||||||
|
|
||||||
|
# Check max_price
|
||||||
|
bid_amount_int = int(bid_amount)
|
||||||
|
max_price_int = int(auction.maxPrice.quantity)
|
||||||
|
if max_price_int < bid_amount_int:
|
||||||
|
logger.log(f"Skipping auction {auction_id} with max_price ({max_price_int}) less than bid_amount ({bid_amount_int})")
|
||||||
|
return "SKIP", ""
|
||||||
|
|
||||||
|
# Bid on the auction
|
||||||
|
reveal_file_path = laconic.commit_bid(auction_id, bid_amount_int)
|
||||||
|
logger.log(f"Commited bid on auction {auction_id} with amount {bid_amount_int}")
|
||||||
|
|
||||||
|
return "COMMIT", reveal_file_path
|
||||||
|
|
||||||
|
if current_status == "COMMIT":
|
||||||
|
# Return if auction still in commit state
|
||||||
|
if auction.status == AuctionStatus.COMMIT:
|
||||||
|
logger.log(f"Auction {auction_id} status: {auction.status}")
|
||||||
|
return current_status, reveal_file_path
|
||||||
|
|
||||||
|
# Reveal bid
|
||||||
|
if auction.status == AuctionStatus.REVEAL:
|
||||||
|
laconic.reveal_bid(auction_id, reveal_file_path)
|
||||||
|
logger.log(f"Revealed bid on auction {auction_id}")
|
||||||
|
|
||||||
|
return "REVEAL", reveal_file_path
|
||||||
|
|
||||||
|
raise Exception(f"Unexpected auction {auction_id} status: {auction.status}")
|
||||||
|
|
||||||
|
if current_status == "REVEAL":
|
||||||
|
# Return if auction still in reveal state
|
||||||
|
if auction.status == AuctionStatus.REVEAL:
|
||||||
|
logger.log(f"Auction {auction_id} status: {auction.status}")
|
||||||
|
return current_status, reveal_file_path
|
||||||
|
|
||||||
|
# Return if auction is completed
|
||||||
|
if auction.status == AuctionStatus.COMPLETED:
|
||||||
|
logger.log(f"Auction {auction_id} completed")
|
||||||
|
return "COMPLETED", ""
|
||||||
|
|
||||||
|
raise Exception(f"Unexpected auction {auction_id} status: {auction.status}")
|
||||||
|
|
||||||
|
raise Exception(f"Got request with unexpected status: {current_status}")
|
||||||
|
|
||||||
|
|
||||||
|
def dump_known_auction_requests(filename, requests, status="SEEN"):
|
||||||
|
if not filename:
|
||||||
|
return
|
||||||
|
known_requests = load_known_requests(filename)
|
||||||
|
for r in requests:
|
||||||
|
known_requests[r.id] = {"revealFile": r.revealFile, "status": status}
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
json.dump(known_requests, f)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--state-file",
|
||||||
|
help="File to store state about previously seen auction requests.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--bid-amount",
|
||||||
|
help="Bid to place on application deployment auctions (in alnt)",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
state_file,
|
||||||
|
bid_amount,
|
||||||
|
registry_lock_file,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
if int(bid_amount) < 0:
|
||||||
|
print("--bid-amount cannot be less than 0", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
logger = TimedLogger(file=sys.stderr)
|
||||||
|
|
||||||
|
try:
|
||||||
|
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
||||||
|
auctions_requests = laconic.app_deployment_auctions()
|
||||||
|
|
||||||
|
previous_requests = {}
|
||||||
|
logger.log(f"Loading known auctions from {state_file}...")
|
||||||
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
|
# Process new requests first
|
||||||
|
auctions_requests.sort(key=lambda r: r.createTime)
|
||||||
|
auctions_requests.reverse()
|
||||||
|
|
||||||
|
requests_to_execute = []
|
||||||
|
|
||||||
|
for r in auctions_requests:
|
||||||
|
logger.log(f"BEGIN: Examining request {r.id}")
|
||||||
|
result_status = "PENDING"
|
||||||
|
reveal_file_path = ""
|
||||||
|
try:
|
||||||
|
application = r.attributes.application
|
||||||
|
|
||||||
|
# Handle already seen requests
|
||||||
|
if r.id in previous_requests:
|
||||||
|
# If it's not in commit or reveal status, skip the request as we've already seen it
|
||||||
|
current_status = previous_requests[r.id].get("status", "")
|
||||||
|
result_status = current_status
|
||||||
|
if current_status not in ["COMMIT", "REVEAL"]:
|
||||||
|
logger.log(f"Skipping request {r.id}, we've already seen it.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
reveal_file_path = previous_requests[r.id].get("revealFile", "")
|
||||||
|
logger.log(f"Found existing auction request {r.id} for application {application}, status {current_status}.")
|
||||||
|
else:
|
||||||
|
# It's a fresh request, check application record
|
||||||
|
app = laconic.get_record(application)
|
||||||
|
if not app:
|
||||||
|
logger.log(f"Skipping request {r.id}, cannot locate app.")
|
||||||
|
result_status = "ERROR"
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.log(f"Found pending auction request {r.id} for application {application}.")
|
||||||
|
|
||||||
|
# Add requests to be processed
|
||||||
|
requests_to_execute.append((r, result_status, reveal_file_path))
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
result_status = "ERROR"
|
||||||
|
logger.log(f"ERROR: examining request {r.id}: " + str(e))
|
||||||
|
finally:
|
||||||
|
logger.log(f"DONE: Examining request {r.id} with result {result_status}.")
|
||||||
|
if result_status in ["ERROR"]:
|
||||||
|
dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status)
|
||||||
|
|
||||||
|
logger.log(f"Found {len(requests_to_execute)} request(s) to process.")
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
for r, current_status, reveal_file_path in requests_to_execute:
|
||||||
|
logger.log(f"Processing {r.id}: BEGIN")
|
||||||
|
result_status = "ERROR"
|
||||||
|
try:
|
||||||
|
result_status, reveal_file_path = process_app_deployment_auction(
|
||||||
|
ctx,
|
||||||
|
laconic,
|
||||||
|
r,
|
||||||
|
current_status,
|
||||||
|
reveal_file_path,
|
||||||
|
bid_amount,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.log(f"ERROR {r.id}:" + str(e))
|
||||||
|
finally:
|
||||||
|
logger.log(f"Processing {r.id}: END - {result_status}")
|
||||||
|
dump_known_auction_requests(state_file, [AttrDict({"id": r.id, "revealFile": reveal_file_path})], result_status)
|
||||||
|
except Exception as e:
|
||||||
|
logger.log("UNCAUGHT ERROR:" + str(e))
|
||||||
|
raise e
|
124
stack_orchestrator/deploy/webapp/publish_deployment_auction.py
Normal file
124
stack_orchestrator/deploy/webapp/publish_deployment_auction.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import click
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
TOKEN_DENOM,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fatal(msg: str):
|
||||||
|
print(msg, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--app",
|
||||||
|
help="The LRN of the application to deploy.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--commits-duration",
|
||||||
|
help="Auction commits duration (in seconds) (default: 600).",
|
||||||
|
default=600,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--reveals-duration",
|
||||||
|
help="Auction reveals duration (in seconds) (default: 600).",
|
||||||
|
default=600,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--commit-fee",
|
||||||
|
help="Auction bid commit fee (in alnt) (default: 100000).",
|
||||||
|
default=100000,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--reveal-fee",
|
||||||
|
help="Auction bid reveal fee (in alnt) (default: 100000).",
|
||||||
|
default=100000,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--max-price",
|
||||||
|
help="Max acceptable bid price (in alnt).",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--num-providers",
|
||||||
|
help="Max acceptable bid price (in alnt).",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
app,
|
||||||
|
commits_duration,
|
||||||
|
reveals_duration,
|
||||||
|
commit_fee,
|
||||||
|
reveal_fee,
|
||||||
|
max_price,
|
||||||
|
num_providers,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
app_record = laconic.get_record(app)
|
||||||
|
if not app_record:
|
||||||
|
fatal(f"Unable to locate app: {app}")
|
||||||
|
|
||||||
|
provider_auction_params = {
|
||||||
|
"kind": AUCTION_KIND_PROVIDER,
|
||||||
|
"commits_duration": commits_duration,
|
||||||
|
"reveals_duration": reveals_duration,
|
||||||
|
"denom": TOKEN_DENOM,
|
||||||
|
"commit_fee": commit_fee,
|
||||||
|
"reveal_fee": reveal_fee,
|
||||||
|
"max_price": max_price,
|
||||||
|
"num_providers": num_providers,
|
||||||
|
}
|
||||||
|
auction_id = laconic.create_deployment_auction(provider_auction_params)
|
||||||
|
print("Deployment auction created:", auction_id)
|
||||||
|
|
||||||
|
if not auction_id:
|
||||||
|
fatal("Unable to create a provider auction")
|
||||||
|
|
||||||
|
deployment_auction = {
|
||||||
|
"record": {
|
||||||
|
"type": "ApplicationDeploymentAuction",
|
||||||
|
"application": app,
|
||||||
|
"auction": auction_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(yaml.dump(deployment_auction))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Publish the deployment auction record
|
||||||
|
laconic.publish(deployment_auction)
|
91
stack_orchestrator/deploy/webapp/publish_webapp_deployer.py
Normal file
91
stack_orchestrator/deploy/webapp/publish_webapp_deployer.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import click
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option("--api-url", help="The API URL of the deployer.", required=True)
|
||||||
|
@click.option(
|
||||||
|
"--public-key-file",
|
||||||
|
help="The public key to use. This should be a binary file.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--lrn", help="eg, lrn://laconic/deployers/my.deployer.name", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--payment-address",
|
||||||
|
help="The address to which payments should be made. "
|
||||||
|
"Default is the current laconic account.",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--min-required-payment",
|
||||||
|
help="List the minimum required payment (in alnt) to process a deployment request.",
|
||||||
|
default=0,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command( # noqa: C901
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
api_url,
|
||||||
|
public_key_file,
|
||||||
|
lrn,
|
||||||
|
payment_address,
|
||||||
|
min_required_payment,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
if not payment_address:
|
||||||
|
payment_address = laconic.whoami().address
|
||||||
|
|
||||||
|
pub_key = base64.b64encode(open(public_key_file, "rb").read()).decode("ASCII")
|
||||||
|
hostname = urlparse(api_url).hostname
|
||||||
|
webapp_deployer_record = {
|
||||||
|
"record": {
|
||||||
|
"type": "WebappDeployer",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"apiUrl": api_url,
|
||||||
|
"name": hostname,
|
||||||
|
"publicKey": pub_key,
|
||||||
|
"paymentAddress": payment_address,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if min_required_payment:
|
||||||
|
webapp_deployer_record["record"][
|
||||||
|
"minimumPayment"
|
||||||
|
] = f"{min_required_payment}alnt"
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
yaml.dump(webapp_deployer_record, sys.stdout)
|
||||||
|
return
|
||||||
|
|
||||||
|
laconic.publish(webapp_deployer_record, [lrn])
|
77
stack_orchestrator/deploy/webapp/registry_mutex.py
Normal file
77
stack_orchestrator/deploy/webapp/registry_mutex.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
from functools import wraps
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Define default file path for the lock
|
||||||
|
DEFAULT_LOCK_FILE_PATH = "/tmp/registry_mutex_lock_file"
|
||||||
|
LOCK_TIMEOUT = 30
|
||||||
|
LOCK_RETRY_INTERVAL = 3
|
||||||
|
|
||||||
|
|
||||||
|
def acquire_lock(client, lock_file_path, timeout):
|
||||||
|
# Lock alreay acquired by the current client
|
||||||
|
if client.mutex_lock_acquired:
|
||||||
|
return
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Check if lock file exists and is potentially stale
|
||||||
|
if os.path.exists(lock_file_path):
|
||||||
|
with open(lock_file_path, 'r') as lock_file:
|
||||||
|
timestamp = float(lock_file.read().strip())
|
||||||
|
|
||||||
|
# If lock is stale, remove the lock file
|
||||||
|
if time.time() - timestamp > timeout:
|
||||||
|
print(f"Stale lock detected, removing lock file {lock_file_path}")
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
else:
|
||||||
|
print(f"Lock file {lock_file_path} exists and is recent, waiting...")
|
||||||
|
time.sleep(LOCK_RETRY_INTERVAL)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try to create a new lock file with the current timestamp
|
||||||
|
fd = os.open(lock_file_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||||
|
with os.fdopen(fd, 'w') as lock_file:
|
||||||
|
lock_file.write(str(time.time()))
|
||||||
|
|
||||||
|
client.mutex_lock_acquired = True
|
||||||
|
print(f"Registry lock acquired, {lock_file_path}")
|
||||||
|
|
||||||
|
# Lock successfully acquired
|
||||||
|
return
|
||||||
|
|
||||||
|
except FileExistsError:
|
||||||
|
print(f"Lock file {lock_file_path} exists, waiting...")
|
||||||
|
time.sleep(LOCK_RETRY_INTERVAL)
|
||||||
|
|
||||||
|
|
||||||
|
def release_lock(client, lock_file_path):
|
||||||
|
try:
|
||||||
|
os.remove(lock_file_path)
|
||||||
|
|
||||||
|
client.mutex_lock_acquired = False
|
||||||
|
print(f"Registry lock released, {lock_file_path}")
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Lock file already removed
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def registry_mutex():
|
||||||
|
def decorator(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
lock_file_path = DEFAULT_LOCK_FILE_PATH
|
||||||
|
if self.mutex_lock_file:
|
||||||
|
lock_file_path = self.mutex_lock_file
|
||||||
|
|
||||||
|
# Acquire the lock before running the function
|
||||||
|
acquire_lock(self, lock_file_path, LOCK_TIMEOUT)
|
||||||
|
try:
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
finally:
|
||||||
|
# Release the lock after the function completes
|
||||||
|
release_lock(self, lock_file_path)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
262
stack_orchestrator/deploy/webapp/request_webapp_deployment.py
Normal file
262
stack_orchestrator/deploy/webapp/request_webapp_deployment.py
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from datetime import datetime
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import gnupg
|
||||||
|
import click
|
||||||
|
import requests
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
AUCTION_KIND_PROVIDER,
|
||||||
|
AuctionStatus,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
)
|
||||||
|
from dotenv import dotenv_values
|
||||||
|
|
||||||
|
|
||||||
|
def fatal(msg: str):
|
||||||
|
print(msg, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--app",
|
||||||
|
help="The LRN of the application to deploy.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--auction-id",
|
||||||
|
help="Deployment auction id. Can be used instead of deployer and payment.",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployer",
|
||||||
|
help="The LRN of the deployer to process this request.",
|
||||||
|
)
|
||||||
|
@click.option("--env-file", help="environment file for webapp")
|
||||||
|
@click.option("--config-ref", help="The ref of an existing config upload to use.")
|
||||||
|
@click.option(
|
||||||
|
"--make-payment",
|
||||||
|
help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
|
)
|
||||||
|
@click.option("--dns", help="the DNS name to request (default is autogenerated)")
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command( # noqa: C901
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
app,
|
||||||
|
auction_id,
|
||||||
|
deployer,
|
||||||
|
env_file,
|
||||||
|
config_ref,
|
||||||
|
make_payment,
|
||||||
|
use_payment,
|
||||||
|
dns,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
if auction_id and deployer:
|
||||||
|
print("Cannot specify both --auction-id and --deployer", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if not auction_id and not deployer:
|
||||||
|
print("Must specify either --auction-id or --deployer", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if auction_id and (make_payment or use_payment):
|
||||||
|
print("Cannot specify --auction-id with --make-payment or --use-payment", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if env_file and config_ref:
|
||||||
|
fatal("Cannot use --env-file and --config-ref at the same time.")
|
||||||
|
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
app_record = laconic.get_record(app)
|
||||||
|
if not app_record:
|
||||||
|
fatal(f"Unable to locate app: {app}")
|
||||||
|
|
||||||
|
# Deployers to send requests to
|
||||||
|
deployer_records = []
|
||||||
|
|
||||||
|
auction = None
|
||||||
|
auction_winners = None
|
||||||
|
if auction_id:
|
||||||
|
# Fetch auction record for given auction
|
||||||
|
auction_records_by_id = laconic.app_deployment_auctions({"auction": auction_id})
|
||||||
|
if len(auction_records_by_id) == 0:
|
||||||
|
fatal(f"Unable to locate record for auction: {auction_id}")
|
||||||
|
|
||||||
|
# Cross check app against application in the auction record
|
||||||
|
auction_app = auction_records_by_id[0].attributes.application
|
||||||
|
if auction_app != app:
|
||||||
|
fatal(f"Requested application {app} does not match application from auction record {auction_app}")
|
||||||
|
|
||||||
|
# Fetch auction details
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
if not auction:
|
||||||
|
fatal(f"Unable to locate auction: {auction_id}")
|
||||||
|
|
||||||
|
# Check auction owner
|
||||||
|
if auction.ownerAddress != laconic.whoami().address:
|
||||||
|
fatal(f"Auction {auction_id} owner mismatch")
|
||||||
|
|
||||||
|
# Check auction kind
|
||||||
|
if auction.kind != AUCTION_KIND_PROVIDER:
|
||||||
|
fatal(f"Auction kind needs to be ${AUCTION_KIND_PROVIDER}, got {auction.kind}")
|
||||||
|
|
||||||
|
# Check auction status
|
||||||
|
if auction.status != AuctionStatus.COMPLETED:
|
||||||
|
fatal(f"Auction {auction_id} not completed yet, status {auction.status}")
|
||||||
|
|
||||||
|
# Check that winner list is not empty
|
||||||
|
if len(auction.winnerAddresses) == 0:
|
||||||
|
fatal(f"Auction {auction_id} has no winners")
|
||||||
|
|
||||||
|
auction_winners = auction.winnerAddresses
|
||||||
|
|
||||||
|
# Get deployer record for all the auction winners
|
||||||
|
for auction_winner in auction_winners:
|
||||||
|
# TODO: Match auction winner address with provider address?
|
||||||
|
deployer_records_by_owner = laconic.webapp_deployers({"paymentAddress": auction_winner})
|
||||||
|
if len(deployer_records_by_owner) == 0:
|
||||||
|
print(f"WARNING: Unable to locate deployer for auction winner {auction_winner}")
|
||||||
|
|
||||||
|
# Take first record with name set
|
||||||
|
target_deployer_record = deployer_records_by_owner[0]
|
||||||
|
for r in deployer_records_by_owner:
|
||||||
|
if len(r.names) > 0:
|
||||||
|
target_deployer_record = r
|
||||||
|
break
|
||||||
|
deployer_records.append(target_deployer_record)
|
||||||
|
else:
|
||||||
|
deployer_record = laconic.get_record(deployer)
|
||||||
|
if not deployer_record:
|
||||||
|
fatal(f"Unable to locate deployer: {deployer}")
|
||||||
|
|
||||||
|
deployer_records.append(deployer_record)
|
||||||
|
|
||||||
|
# Create and send request to each deployer
|
||||||
|
deployment_requests = []
|
||||||
|
for deployer_record in deployer_records:
|
||||||
|
# Upload config to deployers if env_file is passed
|
||||||
|
if env_file:
|
||||||
|
tempdir = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
gpg = gnupg.GPG(gnupghome=tempdir)
|
||||||
|
|
||||||
|
# Import the deployer's public key
|
||||||
|
result = gpg.import_keys(
|
||||||
|
base64.b64decode(deployer_record.attributes.publicKey)
|
||||||
|
)
|
||||||
|
if 1 != result.imported:
|
||||||
|
fatal("Failed to import deployer's public key.")
|
||||||
|
|
||||||
|
recip = gpg.list_keys()[0]["uids"][0]
|
||||||
|
|
||||||
|
# Wrap the config
|
||||||
|
config = {
|
||||||
|
# Include account (and payment?) details
|
||||||
|
"authorized": [laconic.whoami().address],
|
||||||
|
"config": {"env": dict(dotenv_values(env_file))},
|
||||||
|
}
|
||||||
|
serialized = yaml.dump(config)
|
||||||
|
|
||||||
|
# Encrypt
|
||||||
|
result = gpg.encrypt(serialized, recip, always_trust=True, armor=False)
|
||||||
|
if not result.ok:
|
||||||
|
fatal("Failed to encrypt config.")
|
||||||
|
|
||||||
|
# Upload it to the deployer's API
|
||||||
|
response = requests.post(
|
||||||
|
f"{deployer_record.attributes.apiUrl}/upload/config",
|
||||||
|
data=result.data,
|
||||||
|
headers={"Content-Type": "application/octet-stream"},
|
||||||
|
)
|
||||||
|
if not response.ok:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
config_ref = response.json()["id"]
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tempdir, ignore_errors=True)
|
||||||
|
|
||||||
|
target_deployer = deployer
|
||||||
|
if (not deployer) and len(deployer_record.names):
|
||||||
|
target_deployer = deployer_record.names[0]
|
||||||
|
|
||||||
|
deployment_request = {
|
||||||
|
"record": {
|
||||||
|
"type": "ApplicationDeploymentRequest",
|
||||||
|
"application": app,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"name": f"{app_record.attributes.name}@{app_record.attributes.version}",
|
||||||
|
"deployer": target_deployer,
|
||||||
|
"meta": {"when": str(datetime.utcnow())},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if auction_id:
|
||||||
|
deployment_request["record"]["auction"] = auction_id
|
||||||
|
|
||||||
|
if config_ref:
|
||||||
|
deployment_request["record"]["config"] = {"ref": config_ref}
|
||||||
|
|
||||||
|
if dns:
|
||||||
|
deployment_request["record"]["dns"] = dns.lower()
|
||||||
|
|
||||||
|
if make_payment:
|
||||||
|
amount = 0
|
||||||
|
if dry_run:
|
||||||
|
deployment_request["record"]["payment"] = "DRY_RUN"
|
||||||
|
elif "auto" == make_payment:
|
||||||
|
if "minimumPayment" in deployer_record.attributes:
|
||||||
|
amount = int(
|
||||||
|
deployer_record.attributes.minimumPayment.replace("alnt", "")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
amount = make_payment
|
||||||
|
if amount:
|
||||||
|
receipt = laconic.send_tokens(
|
||||||
|
deployer_record.attributes.paymentAddress, amount
|
||||||
|
)
|
||||||
|
deployment_request["record"]["payment"] = receipt.tx.hash
|
||||||
|
print("Payment TX:", receipt.tx.hash)
|
||||||
|
elif use_payment:
|
||||||
|
deployment_request["record"]["payment"] = use_payment
|
||||||
|
|
||||||
|
deployment_requests.append(deployment_request)
|
||||||
|
|
||||||
|
# Send all requests
|
||||||
|
for deployment_request in deployment_requests:
|
||||||
|
if dry_run:
|
||||||
|
print(yaml.dump(deployment_request))
|
||||||
|
continue
|
||||||
|
|
||||||
|
laconic.publish(deployment_request)
|
106
stack_orchestrator/deploy/webapp/request_webapp_undeployment.py
Normal file
106
stack_orchestrator/deploy/webapp/request_webapp_undeployment.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# Copyright ©2023 Vulcanize
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import click
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.util import (LaconicRegistryClient)
|
||||||
|
|
||||||
|
|
||||||
|
def fatal(msg: str):
|
||||||
|
print(msg, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option(
|
||||||
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployer",
|
||||||
|
help="The LRN of the deployer to process this request.",
|
||||||
|
required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment",
|
||||||
|
help="Deployment record (ApplicationDeploymentRecord) id of the deployment to remove.",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--make-payment",
|
||||||
|
help="The payment to make (in alnt). The value should be a number or 'auto' to use the deployer's minimum required payment.",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--use-payment", help="The TX id of an existing, unused payment", default=None
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run",
|
||||||
|
help="Don't publish anything, just report what would be done.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def command(
|
||||||
|
ctx,
|
||||||
|
laconic_config,
|
||||||
|
deployer,
|
||||||
|
deployment,
|
||||||
|
make_payment,
|
||||||
|
use_payment,
|
||||||
|
dry_run,
|
||||||
|
):
|
||||||
|
if make_payment and use_payment:
|
||||||
|
fatal("Cannot use --make-payment and --use-payment at the same time.")
|
||||||
|
|
||||||
|
laconic = LaconicRegistryClient(laconic_config)
|
||||||
|
|
||||||
|
deployer_record = laconic.get_record(deployer)
|
||||||
|
if not deployer_record:
|
||||||
|
fatal(f"Unable to locate deployer: {deployer}")
|
||||||
|
|
||||||
|
undeployment_request = {
|
||||||
|
"record": {
|
||||||
|
"type": "ApplicationDeploymentRemovalRequest",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"deployer": deployer,
|
||||||
|
"deployment": deployment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if make_payment:
|
||||||
|
amount = 0
|
||||||
|
if dry_run:
|
||||||
|
undeployment_request["record"]["payment"] = "DRY_RUN"
|
||||||
|
elif "auto" == make_payment:
|
||||||
|
if "minimumPayment" in deployer_record.attributes:
|
||||||
|
amount = int(
|
||||||
|
deployer_record.attributes.minimumPayment.replace("alnt", "")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
amount = make_payment
|
||||||
|
if amount:
|
||||||
|
receipt = laconic.send_tokens(
|
||||||
|
deployer_record.attributes.paymentAddress, amount
|
||||||
|
)
|
||||||
|
undeployment_request["record"]["payment"] = receipt.tx.hash
|
||||||
|
print("Payment TX:", receipt.tx.hash)
|
||||||
|
elif use_payment:
|
||||||
|
undeployment_request["record"]["payment"] = use_payment
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(yaml.dump(undeployment_request))
|
||||||
|
return
|
||||||
|
|
||||||
|
laconic.publish(undeployment_request)
|
@ -20,18 +20,33 @@ import sys
|
|||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient, match_owner, skip_by_tag
|
from stack_orchestrator.deploy.webapp.util import (
|
||||||
|
TimedLogger,
|
||||||
|
LaconicRegistryClient,
|
||||||
|
match_owner,
|
||||||
|
skip_by_tag,
|
||||||
|
confirm_payment,
|
||||||
|
)
|
||||||
|
|
||||||
|
main_logger = TimedLogger(file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
def process_app_removal_request(ctx,
|
def process_app_removal_request(
|
||||||
laconic: LaconicRegistryClient,
|
ctx,
|
||||||
app_removal_request,
|
laconic: LaconicRegistryClient,
|
||||||
deployment_parent_dir,
|
app_removal_request,
|
||||||
delete_volumes,
|
deployment_parent_dir,
|
||||||
delete_names):
|
delete_volumes,
|
||||||
deployment_record = laconic.get_record(app_removal_request.attributes.deployment, require=True)
|
delete_names,
|
||||||
|
webapp_deployer_record,
|
||||||
|
):
|
||||||
|
deployment_record = laconic.get_record(
|
||||||
|
app_removal_request.attributes.deployment, require=True
|
||||||
|
)
|
||||||
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
dns_record = laconic.get_record(deployment_record.attributes.dns, require=True)
|
||||||
deployment_dir = os.path.join(deployment_parent_dir, dns_record.attributes.name)
|
deployment_dir = os.path.join(
|
||||||
|
deployment_parent_dir, dns_record.attributes.name.lower()
|
||||||
|
)
|
||||||
|
|
||||||
if not os.path.exists(deployment_dir):
|
if not os.path.exists(deployment_dir):
|
||||||
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
raise Exception("Deployment directory %s does not exist." % deployment_dir)
|
||||||
@ -41,13 +56,18 @@ def process_app_removal_request(ctx,
|
|||||||
|
|
||||||
# Or of the original deployment request.
|
# Or of the original deployment request.
|
||||||
if not matched_owner and deployment_record.attributes.request:
|
if not matched_owner and deployment_record.attributes.request:
|
||||||
matched_owner = match_owner(app_removal_request, laconic.get_record(deployment_record.attributes.request, require=True))
|
matched_owner = match_owner(
|
||||||
|
app_removal_request,
|
||||||
|
laconic.get_record(deployment_record.attributes.request, require=True),
|
||||||
|
)
|
||||||
|
|
||||||
if matched_owner:
|
if matched_owner:
|
||||||
print("Matched deployment ownership:", matched_owner)
|
main_logger.log("Matched deployment ownership:", matched_owner)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unable to confirm ownership of deployment %s for removal request %s" %
|
raise Exception(
|
||||||
(deployment_record.id, app_removal_request.id))
|
"Unable to confirm ownership of deployment %s for removal request %s"
|
||||||
|
% (deployment_record.id, app_removal_request.id)
|
||||||
|
)
|
||||||
|
|
||||||
# TODO(telackey): Call the function directly. The easiest way to build the correct click context is to
|
# TODO(telackey): Call the function directly. The easiest way to build the correct click context is to
|
||||||
# exec the process, but it would be better to refactor so we could just call down_operation with the
|
# exec the process, but it would be better to refactor so we could just call down_operation with the
|
||||||
@ -64,8 +84,13 @@ def process_app_removal_request(ctx,
|
|||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"request": app_removal_request.id,
|
"request": app_removal_request.id,
|
||||||
"deployment": deployment_record.id,
|
"deployment": deployment_record.id,
|
||||||
|
"deployer": webapp_deployer_record.names[0],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if app_removal_request.attributes.payment:
|
||||||
|
removal_record["record"]["payment"] = app_removal_request.attributes.payment
|
||||||
|
|
||||||
laconic.publish(removal_record)
|
laconic.publish(removal_record)
|
||||||
|
|
||||||
if delete_names:
|
if delete_names:
|
||||||
@ -97,22 +122,84 @@ def dump_known_requests(filename, requests):
|
|||||||
|
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--laconic-config", help="Provide a config file for laconicd", required=True)
|
@click.option(
|
||||||
@click.option("--deployment-parent-dir", help="Create deployment directories beneath this directory", required=True)
|
"--laconic-config", help="Provide a config file for laconicd", required=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--deployment-parent-dir",
|
||||||
|
help="Create deployment directories beneath this directory",
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
@click.option("--request-id", help="The ApplicationDeploymentRemovalRequest to process")
|
@click.option("--request-id", help="The ApplicationDeploymentRemovalRequest to process")
|
||||||
@click.option("--discover", help="Discover and process all pending ApplicationDeploymentRemovalRequests",
|
@click.option(
|
||||||
is_flag=True, default=False)
|
"--discover",
|
||||||
@click.option("--state-file", help="File to store state about previously seen requests.")
|
help="Discover and process all pending ApplicationDeploymentRemovalRequests",
|
||||||
@click.option("--only-update-state", help="Only update the state file, don't process any requests anything.", is_flag=True)
|
is_flag=True,
|
||||||
@click.option("--delete-names/--preserve-names", help="Delete all names associated with removed deployments.", default=True)
|
default=False,
|
||||||
@click.option("--delete-volumes/--preserve-volumes", default=True, help="delete data volumes")
|
)
|
||||||
@click.option("--dry-run", help="Don't do anything, just report what would be done.", is_flag=True)
|
@click.option(
|
||||||
@click.option("--include-tags", help="Only include requests with matching tags (comma-separated).", default="")
|
"--state-file", help="File to store state about previously seen requests."
|
||||||
@click.option("--exclude-tags", help="Exclude requests with matching tags (comma-separated).", default="")
|
)
|
||||||
|
@click.option(
|
||||||
|
"--only-update-state",
|
||||||
|
help="Only update the state file, don't process any requests anything.",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--delete-names/--preserve-names",
|
||||||
|
help="Delete all names associated with removed deployments.",
|
||||||
|
default=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--delete-volumes/--preserve-volumes", default=True, help="delete data volumes"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--dry-run", help="Don't do anything, just report what would be done.", is_flag=True
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--include-tags",
|
||||||
|
help="Only include requests with matching tags (comma-separated).",
|
||||||
|
default="",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--exclude-tags",
|
||||||
|
help="Exclude requests with matching tags (comma-separated).",
|
||||||
|
default="",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--min-required-payment",
|
||||||
|
help="Requests must have a minimum payment to be processed (in alnt)",
|
||||||
|
default=0,
|
||||||
|
)
|
||||||
|
@click.option("--lrn", help="The LRN of this deployer.", required=True)
|
||||||
|
@click.option(
|
||||||
|
"--all-requests",
|
||||||
|
help="Handle requests addressed to anyone (by default only requests to"
|
||||||
|
"my payment address are examined).",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--registry-lock-file", help="File path to use for registry mutex lock", default=None
|
||||||
|
)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def command(ctx, laconic_config, deployment_parent_dir,
|
def command( # noqa: C901
|
||||||
request_id, discover, state_file, only_update_state,
|
ctx,
|
||||||
delete_names, delete_volumes, dry_run, include_tags, exclude_tags):
|
laconic_config,
|
||||||
|
deployment_parent_dir,
|
||||||
|
request_id,
|
||||||
|
discover,
|
||||||
|
state_file,
|
||||||
|
only_update_state,
|
||||||
|
delete_names,
|
||||||
|
delete_volumes,
|
||||||
|
dry_run,
|
||||||
|
include_tags,
|
||||||
|
exclude_tags,
|
||||||
|
min_required_payment,
|
||||||
|
lrn,
|
||||||
|
all_requests,
|
||||||
|
registry_lock_file,
|
||||||
|
):
|
||||||
if request_id and discover:
|
if request_id and discover:
|
||||||
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
print("Cannot specify both --request-id and --discover", file=sys.stderr)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
@ -129,34 +216,55 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
|||||||
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
include_tags = [tag.strip() for tag in include_tags.split(",") if tag]
|
||||||
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
exclude_tags = [tag.strip() for tag in exclude_tags.split(",") if tag]
|
||||||
|
|
||||||
laconic = LaconicRegistryClient(laconic_config)
|
laconic = LaconicRegistryClient(laconic_config, log_file=sys.stderr, mutex_lock_file=registry_lock_file)
|
||||||
|
deployer_record = laconic.get_record(lrn, require=True)
|
||||||
|
payment_address = deployer_record.attributes.paymentAddress
|
||||||
|
main_logger.log(f"Payment address: {payment_address}")
|
||||||
|
|
||||||
|
if min_required_payment and not payment_address:
|
||||||
|
print(
|
||||||
|
f"Minimum payment required, but no payment address listed for deployer: {lrn}.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
# Find deployment removal requests.
|
# Find deployment removal requests.
|
||||||
# single request
|
# single request
|
||||||
if request_id:
|
if request_id:
|
||||||
|
main_logger.log(f"Retrieving request {request_id}...")
|
||||||
requests = [laconic.get_record(request_id, require=True)]
|
requests = [laconic.get_record(request_id, require=True)]
|
||||||
# TODO: assert record type
|
# TODO: assert record type
|
||||||
# all requests
|
# all requests
|
||||||
elif discover:
|
elif discover:
|
||||||
requests = laconic.app_deployment_removal_requests()
|
main_logger.log("Discovering removal requests...")
|
||||||
|
if all_requests:
|
||||||
|
requests = laconic.app_deployment_removal_requests()
|
||||||
|
else:
|
||||||
|
requests = laconic.app_deployment_removal_requests({"deployer": lrn})
|
||||||
|
|
||||||
if only_update_state:
|
if only_update_state:
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
dump_known_requests(state_file, requests)
|
dump_known_requests(state_file, requests)
|
||||||
return
|
return
|
||||||
|
|
||||||
previous_requests = load_known_requests(state_file)
|
previous_requests = {}
|
||||||
|
if state_file:
|
||||||
|
main_logger.log(f"Loading known requests from {state_file}...")
|
||||||
|
previous_requests = load_known_requests(state_file)
|
||||||
|
|
||||||
requests.sort(key=lambda r: r.createTime)
|
requests.sort(key=lambda r: r.createTime)
|
||||||
requests.reverse()
|
requests.reverse()
|
||||||
|
|
||||||
# Find deployments.
|
# Find deployments.
|
||||||
deployments = {}
|
named_deployments = {}
|
||||||
for d in laconic.app_deployments(all=True):
|
main_logger.log("Discovering app deployments...")
|
||||||
deployments[d.id] = d
|
for d in laconic.app_deployments(all=False):
|
||||||
|
named_deployments[d.id] = d
|
||||||
|
|
||||||
# Find removal requests.
|
# Find removal requests.
|
||||||
removals_by_deployment = {}
|
removals_by_deployment = {}
|
||||||
removals_by_request = {}
|
removals_by_request = {}
|
||||||
|
main_logger.log("Discovering deployment removals...")
|
||||||
for r in laconic.app_deployment_removals():
|
for r in laconic.app_deployment_removals():
|
||||||
if r.attributes.deployment:
|
if r.attributes.deployment:
|
||||||
# TODO: should we handle CRNs?
|
# TODO: should we handle CRNs?
|
||||||
@ -165,33 +273,70 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
|||||||
one_per_deployment = {}
|
one_per_deployment = {}
|
||||||
for r in requests:
|
for r in requests:
|
||||||
if not r.attributes.deployment:
|
if not r.attributes.deployment:
|
||||||
print(f"Skipping removal request {r.id} since it was a cancellation.")
|
main_logger.log(
|
||||||
|
f"Skipping removal request {r.id} since it was a cancellation."
|
||||||
|
)
|
||||||
elif r.attributes.deployment in one_per_deployment:
|
elif r.attributes.deployment in one_per_deployment:
|
||||||
print(f"Skipping removal request {r.id} since it was superseded.")
|
main_logger.log(f"Skipping removal request {r.id} since it was superseded.")
|
||||||
else:
|
else:
|
||||||
one_per_deployment[r.attributes.deployment] = r
|
one_per_deployment[r.attributes.deployment] = r
|
||||||
|
|
||||||
requests_to_execute = []
|
requests_to_check_for_payment = []
|
||||||
for r in one_per_deployment.values():
|
for r in one_per_deployment.values():
|
||||||
if skip_by_tag(r, include_tags, exclude_tags):
|
try:
|
||||||
print("Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)" % (r.id,
|
if r.attributes.deployment not in named_deployments:
|
||||||
include_tags,
|
main_logger.log(
|
||||||
exclude_tags,
|
f"Skipping removal request {r.id} for {r.attributes.deployment} because it does"
|
||||||
r.attributes.tags))
|
f"not appear to refer to a live, named deployment."
|
||||||
elif r.id in removals_by_request:
|
)
|
||||||
print(f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}")
|
elif skip_by_tag(r, include_tags, exclude_tags):
|
||||||
elif r.attributes.deployment in removals_by_deployment:
|
main_logger.log(
|
||||||
print(
|
"Skipping removal request %s, filtered by tag (include %s, exclude %s, present %s)"
|
||||||
f"Found removal record for indicated deployment {r.attributes.deployment} at "
|
% (r.id, include_tags, exclude_tags, r.attributes.tags)
|
||||||
f"{removals_by_deployment[r.attributes.deployment].id}")
|
)
|
||||||
else:
|
elif r.id in removals_by_request:
|
||||||
if r.id not in previous_requests:
|
main_logger.log(
|
||||||
print(f"Request {r.id} needs to processed.")
|
f"Found satisfied request for {r.id} at {removals_by_request[r.id].id}"
|
||||||
|
)
|
||||||
|
elif r.attributes.deployment in removals_by_deployment:
|
||||||
|
main_logger.log(
|
||||||
|
f"Found removal record for indicated deployment {r.attributes.deployment} at "
|
||||||
|
f"{removals_by_deployment[r.attributes.deployment].id}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if r.id not in previous_requests:
|
||||||
|
main_logger.log(f"Request {r.id} needs to processed.")
|
||||||
|
requests_to_check_for_payment.append(r)
|
||||||
|
else:
|
||||||
|
main_logger.log(
|
||||||
|
f"Skipping unsatisfied request {r.id} because we have seen it before."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
main_logger.log(f"ERROR examining {r.id}: {e}")
|
||||||
|
|
||||||
|
requests_to_execute = []
|
||||||
|
# TODO: Handle requests with auction
|
||||||
|
if min_required_payment:
|
||||||
|
for r in requests_to_check_for_payment:
|
||||||
|
main_logger.log(f"{r.id}: Confirming payment...")
|
||||||
|
if confirm_payment(
|
||||||
|
laconic,
|
||||||
|
r,
|
||||||
|
payment_address,
|
||||||
|
min_required_payment,
|
||||||
|
main_logger,
|
||||||
|
):
|
||||||
|
main_logger.log(f"{r.id}: Payment confirmed.")
|
||||||
requests_to_execute.append(r)
|
requests_to_execute.append(r)
|
||||||
else:
|
else:
|
||||||
print(f"Skipping unsatisfied request {r.id} because we have seen it before.")
|
main_logger.log(f"Skipping request {r.id}: unable to verify payment.")
|
||||||
|
dump_known_requests(state_file, [r])
|
||||||
|
else:
|
||||||
|
requests_to_execute = requests_to_check_for_payment
|
||||||
|
|
||||||
print("Found %d unsatisfied request(s) to process." % len(requests_to_execute))
|
main_logger.log(
|
||||||
|
"Found %d unsatisfied request(s) to process." % len(requests_to_execute)
|
||||||
|
)
|
||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
for r in requests_to_execute:
|
for r in requests_to_execute:
|
||||||
@ -202,7 +347,10 @@ def command(ctx, laconic_config, deployment_parent_dir,
|
|||||||
r,
|
r,
|
||||||
os.path.abspath(deployment_parent_dir),
|
os.path.abspath(deployment_parent_dir),
|
||||||
delete_volumes,
|
delete_volumes,
|
||||||
delete_names
|
delete_names,
|
||||||
|
deployer_record,
|
||||||
)
|
)
|
||||||
|
except Exception as e:
|
||||||
|
main_logger.log(f"ERROR processing removal request {r.id}: {e}")
|
||||||
finally:
|
finally:
|
||||||
dump_known_requests(state_file, [r])
|
dump_known_requests(state_file, [r])
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright © 2023 Vulcanize
|
# = str(min_required_payment) Copyright © 2023 Vulcanize
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
@ -22,9 +22,23 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from stack_orchestrator.deploy.webapp.registry_mutex import registry_mutex
|
||||||
|
|
||||||
|
|
||||||
|
class AuctionStatus(str, Enum):
|
||||||
|
COMMIT = "commit"
|
||||||
|
REVEAL = "reveal"
|
||||||
|
COMPLETED = "completed"
|
||||||
|
EXPIRED = "expired"
|
||||||
|
|
||||||
|
|
||||||
|
TOKEN_DENOM = "alnt"
|
||||||
|
AUCTION_KIND_PROVIDER = "provider"
|
||||||
|
|
||||||
|
|
||||||
class AttrDict(dict):
|
class AttrDict(dict):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@ -59,6 +73,12 @@ class TimedLogger:
|
|||||||
self.last = datetime.datetime.now()
|
self.last = datetime.datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
def load_known_requests(filename):
|
||||||
|
if filename and os.path.exists(filename):
|
||||||
|
return json.load(open(filename, "r"))
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def logged_cmd(log_file, *vargs):
|
def logged_cmd(log_file, *vargs):
|
||||||
result = None
|
result = None
|
||||||
try:
|
try:
|
||||||
@ -83,17 +103,114 @@ def match_owner(recordA, *records):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_lrn(name_or_id: str):
|
||||||
|
if name_or_id:
|
||||||
|
return str(name_or_id).startswith("lrn://")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_id(name_or_id: str):
|
||||||
|
return not is_lrn(name_or_id)
|
||||||
|
|
||||||
|
|
||||||
class LaconicRegistryClient:
|
class LaconicRegistryClient:
|
||||||
def __init__(self, config_file, log_file=None):
|
def __init__(self, config_file, log_file=None, mutex_lock_file=None):
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
self.log_file = log_file
|
self.log_file = log_file
|
||||||
self.cache = AttrDict(
|
self.cache = AttrDict(
|
||||||
{
|
{
|
||||||
"name_or_id": {},
|
"name_or_id": {},
|
||||||
|
"accounts": {},
|
||||||
|
"txs": {},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
def list_records(self, criteria={}, all=False):
|
self.mutex_lock_file = mutex_lock_file
|
||||||
|
self.mutex_lock_acquired = False
|
||||||
|
|
||||||
|
def whoami(self, refresh=False):
|
||||||
|
if not refresh and "whoami" in self.cache:
|
||||||
|
return self.cache["whoami"]
|
||||||
|
|
||||||
|
args = ["laconic", "-c", self.config_file, "registry", "account", "get"]
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(results):
|
||||||
|
self.cache["whoami"] = results[0]
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_owner(self, record, require=False):
|
||||||
|
bond = self.get_bond(record.bondId, require)
|
||||||
|
if bond:
|
||||||
|
return bond.owner
|
||||||
|
|
||||||
|
return bond
|
||||||
|
|
||||||
|
def get_account(self, address, refresh=False, require=False):
|
||||||
|
if not refresh and address in self.cache["accounts"]:
|
||||||
|
return self.cache["accounts"][address]
|
||||||
|
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"account",
|
||||||
|
"get",
|
||||||
|
"--address",
|
||||||
|
address,
|
||||||
|
]
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
if len(results):
|
||||||
|
self.cache["accounts"][address] = results[0]
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
if require:
|
||||||
|
raise Exception("Cannot locate account:", address)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_bond(self, id, require=False):
|
||||||
|
if id in self.cache.name_or_id:
|
||||||
|
return self.cache.name_or_id[id]
|
||||||
|
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"bond",
|
||||||
|
"get",
|
||||||
|
"--id",
|
||||||
|
id,
|
||||||
|
]
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
self._add_to_cache(results)
|
||||||
|
if len(results):
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
if require:
|
||||||
|
raise Exception("Cannot locate bond:", id)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def list_bonds(self):
|
||||||
|
args = ["laconic", "-c", self.config_file, "registry", "bond", "list"]
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
self._add_to_cache(results)
|
||||||
|
return results
|
||||||
|
|
||||||
|
def list_records(self, criteria=None, all=False):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
args = ["laconic", "-c", self.config_file, "registry", "record", "list"]
|
args = ["laconic", "-c", self.config_file, "registry", "record", "list"]
|
||||||
|
|
||||||
if all:
|
if all:
|
||||||
@ -104,22 +221,17 @@ class LaconicRegistryClient:
|
|||||||
args.append("--%s" % k)
|
args.append("--%s" % k)
|
||||||
args.append(str(v))
|
args.append(str(v))
|
||||||
|
|
||||||
results = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
|
||||||
# Most recent records first
|
# Most recent records first
|
||||||
results.sort(key=lambda r: r.createTime)
|
results.sort(key=lambda r: r.createTime)
|
||||||
results.reverse()
|
results.reverse()
|
||||||
|
self._add_to_cache(results)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def is_lrn(self, name_or_id: str):
|
|
||||||
if name_or_id:
|
|
||||||
return str(name_or_id).startswith("lrn://")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_id(self, name_or_id: str):
|
|
||||||
return not self.is_lrn(name_or_id)
|
|
||||||
|
|
||||||
def _add_to_cache(self, records):
|
def _add_to_cache(self, records):
|
||||||
if not records:
|
if not records:
|
||||||
return
|
return
|
||||||
@ -129,9 +241,10 @@ class LaconicRegistryClient:
|
|||||||
if p.names:
|
if p.names:
|
||||||
for lrn in p.names:
|
for lrn in p.names:
|
||||||
self.cache["name_or_id"][lrn] = p
|
self.cache["name_or_id"][lrn] = p
|
||||||
if p.attributes.type not in self.cache:
|
if p.attributes and p.attributes.type:
|
||||||
self.cache[p.attributes.type] = []
|
if p.attributes.type not in self.cache:
|
||||||
self.cache[p.attributes.type].append(p)
|
self.cache[p.attributes.type] = []
|
||||||
|
self.cache[p.attributes.type].append(p)
|
||||||
|
|
||||||
def resolve(self, name):
|
def resolve(self, name):
|
||||||
if not name:
|
if not name:
|
||||||
@ -142,7 +255,9 @@ class LaconicRegistryClient:
|
|||||||
|
|
||||||
args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name]
|
args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name]
|
||||||
|
|
||||||
parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
parsed = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
if parsed:
|
if parsed:
|
||||||
self._add_to_cache(parsed)
|
self._add_to_cache(parsed)
|
||||||
return parsed[0]
|
return parsed[0]
|
||||||
@ -158,7 +273,7 @@ class LaconicRegistryClient:
|
|||||||
if name_or_id in self.cache.name_or_id:
|
if name_or_id in self.cache.name_or_id:
|
||||||
return self.cache.name_or_id[name_or_id]
|
return self.cache.name_or_id[name_or_id]
|
||||||
|
|
||||||
if self.is_lrn(name_or_id):
|
if is_lrn(name_or_id):
|
||||||
return self.resolve(name_or_id)
|
return self.resolve(name_or_id)
|
||||||
|
|
||||||
args = [
|
args = [
|
||||||
@ -172,7 +287,9 @@ class LaconicRegistryClient:
|
|||||||
name_or_id,
|
name_or_id,
|
||||||
]
|
]
|
||||||
|
|
||||||
parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))]
|
parsed = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
if len(parsed):
|
if len(parsed):
|
||||||
self._add_to_cache(parsed)
|
self._add_to_cache(parsed)
|
||||||
return parsed[0]
|
return parsed[0]
|
||||||
@ -181,38 +298,128 @@ class LaconicRegistryClient:
|
|||||||
raise Exception("Cannot locate record:", name_or_id)
|
raise Exception("Cannot locate record:", name_or_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def app_deployment_requests(self, all=True):
|
def get_tx(self, txHash, require=False):
|
||||||
return self.list_records({"type": "ApplicationDeploymentRequest"}, all)
|
if txHash in self.cache["txs"]:
|
||||||
|
return self.cache["txs"][txHash]
|
||||||
|
|
||||||
def app_deployments(self, all=True):
|
args = [
|
||||||
return self.list_records({"type": "ApplicationDeploymentRecord"}, all)
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"tokens",
|
||||||
|
"gettx",
|
||||||
|
"--hash",
|
||||||
|
txHash,
|
||||||
|
]
|
||||||
|
|
||||||
def app_deployment_removal_requests(self, all=True):
|
parsed = None
|
||||||
return self.list_records({"type": "ApplicationDeploymentRemovalRequest"}, all)
|
try:
|
||||||
|
parsed = AttrDict(json.loads(logged_cmd(self.log_file, *args)))
|
||||||
|
except: # noqa: E722
|
||||||
|
pass
|
||||||
|
|
||||||
def app_deployment_removals(self, all=True):
|
if parsed:
|
||||||
return self.list_records({"type": "ApplicationDeploymentRemovalRecord"}, all)
|
self.cache["txs"][txHash] = parsed
|
||||||
|
return parsed
|
||||||
|
|
||||||
def publish(self, record, names=[]):
|
if require:
|
||||||
|
raise Exception("Cannot locate tx:", hash)
|
||||||
|
|
||||||
|
def get_auction(self, auction_id, require=False):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"get",
|
||||||
|
"--id",
|
||||||
|
auction_id,
|
||||||
|
]
|
||||||
|
|
||||||
|
results = None
|
||||||
|
try:
|
||||||
|
results = [
|
||||||
|
AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args)) if r
|
||||||
|
]
|
||||||
|
except: # noqa: E722
|
||||||
|
pass
|
||||||
|
|
||||||
|
if results and len(results):
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
if require:
|
||||||
|
raise Exception("Cannot locate auction:", auction_id)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def app_deployment_requests(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentRequest"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def app_deployments(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentRecord"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def app_deployment_removal_requests(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentRemovalRequest"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def app_deployment_removals(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentRemovalRecord"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def webapp_deployers(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "WebappDeployer"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
def app_deployment_auctions(self, criteria=None, all=True):
|
||||||
|
if criteria is None:
|
||||||
|
criteria = {}
|
||||||
|
criteria = criteria.copy()
|
||||||
|
criteria["type"] = "ApplicationDeploymentAuction"
|
||||||
|
return self.list_records(criteria, all)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def publish(self, record, names=None):
|
||||||
|
if names is None:
|
||||||
|
names = []
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
try:
|
try:
|
||||||
record_fname = os.path.join(tmpdir, "record.yml")
|
record_fname = os.path.join(tmpdir, "record.yml")
|
||||||
record_file = open(record_fname, 'w')
|
record_file = open(record_fname, "w")
|
||||||
yaml.dump(record, record_file)
|
yaml.dump(record, record_file)
|
||||||
record_file.close()
|
record_file.close()
|
||||||
print(open(record_fname, 'r').read(), file=self.log_file)
|
print(open(record_fname, "r").read(), file=self.log_file)
|
||||||
|
|
||||||
new_record_id = json.loads(
|
new_record_id = json.loads(
|
||||||
logged_cmd(
|
logged_cmd(
|
||||||
self.log_file,
|
self.log_file,
|
||||||
"laconic", "-c",
|
"laconic",
|
||||||
|
"-c",
|
||||||
self.config_file,
|
self.config_file,
|
||||||
"registry",
|
"registry",
|
||||||
"record",
|
"record",
|
||||||
"publish",
|
"publish",
|
||||||
"--filename",
|
"--filename",
|
||||||
record_fname
|
record_fname,
|
||||||
)
|
)
|
||||||
)["id"]
|
)["id"]
|
||||||
for name in names:
|
for name in names:
|
||||||
self.set_name(name, new_record_id)
|
self.set_name(name, new_record_id)
|
||||||
@ -220,11 +427,112 @@ class LaconicRegistryClient:
|
|||||||
finally:
|
finally:
|
||||||
logged_cmd(self.log_file, "rm", "-rf", tmpdir)
|
logged_cmd(self.log_file, "rm", "-rf", tmpdir)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def set_name(self, name, record_id):
|
def set_name(self, name, record_id):
|
||||||
logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "set", name, record_id)
|
logged_cmd(
|
||||||
|
self.log_file,
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"name",
|
||||||
|
"set",
|
||||||
|
name,
|
||||||
|
record_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
def delete_name(self, name):
|
def delete_name(self, name):
|
||||||
logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "delete", name)
|
logged_cmd(
|
||||||
|
self.log_file,
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"name",
|
||||||
|
"delete",
|
||||||
|
name,
|
||||||
|
)
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def send_tokens(self, address, amount, type="alnt"):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"tokens",
|
||||||
|
"send",
|
||||||
|
"--address",
|
||||||
|
address,
|
||||||
|
"--quantity",
|
||||||
|
str(amount),
|
||||||
|
"--type",
|
||||||
|
type,
|
||||||
|
]
|
||||||
|
|
||||||
|
return AttrDict(json.loads(logged_cmd(self.log_file, *args)))
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def create_deployment_auction(self, auction):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"create",
|
||||||
|
"--kind",
|
||||||
|
auction["kind"],
|
||||||
|
"--commits-duration",
|
||||||
|
str(auction["commits_duration"]),
|
||||||
|
"--reveals-duration",
|
||||||
|
str(auction["reveals_duration"]),
|
||||||
|
"--denom",
|
||||||
|
auction["denom"],
|
||||||
|
"--commit-fee",
|
||||||
|
str(auction["commit_fee"]),
|
||||||
|
"--reveal-fee",
|
||||||
|
str(auction["reveal_fee"]),
|
||||||
|
"--max-price",
|
||||||
|
str(auction["max_price"]),
|
||||||
|
"--num-providers",
|
||||||
|
str(auction["num_providers"])
|
||||||
|
]
|
||||||
|
|
||||||
|
return json.loads(logged_cmd(self.log_file, *args))["auctionId"]
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def commit_bid(self, auction_id, amount, type="alnt"):
|
||||||
|
args = [
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"bid",
|
||||||
|
"commit",
|
||||||
|
auction_id,
|
||||||
|
str(amount),
|
||||||
|
type,
|
||||||
|
]
|
||||||
|
|
||||||
|
return json.loads(logged_cmd(self.log_file, *args))["reveal_file"]
|
||||||
|
|
||||||
|
@registry_mutex()
|
||||||
|
def reveal_bid(self, auction_id, reveal_file_path):
|
||||||
|
logged_cmd(
|
||||||
|
self.log_file,
|
||||||
|
"laconic",
|
||||||
|
"-c",
|
||||||
|
self.config_file,
|
||||||
|
"registry",
|
||||||
|
"auction",
|
||||||
|
"bid",
|
||||||
|
"reveal",
|
||||||
|
auction_id,
|
||||||
|
reveal_file_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def file_hash(filename):
|
def file_hash(filename):
|
||||||
@ -248,7 +556,9 @@ def determine_base_container(clone_dir, app_type="webapp"):
|
|||||||
return base_container
|
return base_container
|
||||||
|
|
||||||
|
|
||||||
def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
def build_container_image(app_record, tag, extra_build_args=None, logger=None):
|
||||||
|
if extra_build_args is None:
|
||||||
|
extra_build_args = []
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
# TODO: determine if this code could be calling into the Python git library like setup-repositories
|
# TODO: determine if this code could be calling into the Python git library like setup-repositories
|
||||||
@ -265,9 +575,15 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
|||||||
if github_token:
|
if github_token:
|
||||||
logger.log("Github token detected, setting it in the git environment")
|
logger.log("Github token detected, setting it in the git environment")
|
||||||
git_config_args = [
|
git_config_args = [
|
||||||
"git", "config", "--global", f"url.https://{github_token}:@github.com/.insteadOf", "https://github.com/"
|
"git",
|
||||||
]
|
"config",
|
||||||
result = subprocess.run(git_config_args, stdout=logger.file, stderr=logger.file)
|
"--global",
|
||||||
|
f"url.https://{github_token}:@github.com/.insteadOf",
|
||||||
|
"https://github.com/",
|
||||||
|
]
|
||||||
|
result = subprocess.run(
|
||||||
|
git_config_args, stdout=logger.file, stderr=logger.file
|
||||||
|
)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
if ref:
|
if ref:
|
||||||
# TODO: Determing branch or hash, and use depth 1 if we can.
|
# TODO: Determing branch or hash, and use depth 1 if we can.
|
||||||
@ -275,30 +591,50 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
|||||||
# Never prompt
|
# Never prompt
|
||||||
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
git_env["GIT_TERMINAL_PROMPT"] = "0"
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(["git", "clone", repo, clone_dir], env=git_env, stdout=logger.file, stderr=logger.file)
|
subprocess.check_call(
|
||||||
|
["git", "clone", repo, clone_dir],
|
||||||
|
env=git_env,
|
||||||
|
stdout=logger.file,
|
||||||
|
stderr=logger.file,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git clone failed. Is the repository {repo} private?")
|
logger.log(f"git clone failed. Is the repository {repo} private?")
|
||||||
raise e
|
raise e
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(["git", "checkout", ref], cwd=clone_dir, env=git_env, stdout=logger.file, stderr=logger.file)
|
subprocess.check_call(
|
||||||
|
["git", "checkout", ref],
|
||||||
|
cwd=clone_dir,
|
||||||
|
env=git_env,
|
||||||
|
stdout=logger.file,
|
||||||
|
stderr=logger.file,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
logger.log(f"git checkout failed. Does ref {ref} exist?")
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
# TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
|
# TODO: why is this code different vs the branch above (run vs check_call, and no prompt disable)?
|
||||||
result = subprocess.run(["git", "clone", "--depth", "1", repo, clone_dir], stdout=logger.file, stderr=logger.file)
|
result = subprocess.run(
|
||||||
|
["git", "clone", "--depth", "1", repo, clone_dir],
|
||||||
|
stdout=logger.file,
|
||||||
|
stderr=logger.file,
|
||||||
|
)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
|
|
||||||
base_container = determine_base_container(clone_dir, app_record.attributes.app_type)
|
base_container = determine_base_container(
|
||||||
|
clone_dir, app_record.attributes.app_type
|
||||||
|
)
|
||||||
|
|
||||||
logger.log("Building webapp ...")
|
logger.log("Building webapp ...")
|
||||||
build_command = [
|
build_command = [
|
||||||
sys.argv[0],
|
sys.argv[0],
|
||||||
"--verbose",
|
"--verbose",
|
||||||
"build-webapp",
|
"build-webapp",
|
||||||
"--source-repo", clone_dir,
|
"--source-repo",
|
||||||
"--tag", tag,
|
clone_dir,
|
||||||
"--base-container", base_container
|
"--tag",
|
||||||
|
tag,
|
||||||
|
"--base-container",
|
||||||
|
base_container,
|
||||||
]
|
]
|
||||||
if extra_build_args:
|
if extra_build_args:
|
||||||
build_command.append("--extra-build-args")
|
build_command.append("--extra-build-args")
|
||||||
@ -312,39 +648,57 @@ def build_container_image(app_record, tag, extra_build_args=[], logger=None):
|
|||||||
|
|
||||||
def push_container_image(deployment_dir, logger):
|
def push_container_image(deployment_dir, logger):
|
||||||
logger.log("Pushing images ...")
|
logger.log("Pushing images ...")
|
||||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"],
|
result = subprocess.run(
|
||||||
stdout=logger.file, stderr=logger.file)
|
[sys.argv[0], "deployment", "--dir", deployment_dir, "push-images"],
|
||||||
|
stdout=logger.file,
|
||||||
|
stderr=logger.file,
|
||||||
|
)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
logger.log("Finished pushing images.")
|
logger.log("Finished pushing images.")
|
||||||
|
|
||||||
|
|
||||||
def deploy_to_k8s(deploy_record, deployment_dir, logger):
|
def deploy_to_k8s(deploy_record, deployment_dir, recreate, logger):
|
||||||
if not deploy_record:
|
|
||||||
command = "start"
|
|
||||||
else:
|
|
||||||
command = "update"
|
|
||||||
|
|
||||||
logger.log("Deploying to k8s ...")
|
logger.log("Deploying to k8s ...")
|
||||||
logger.log(f"Running {command} command on deployment dir: {deployment_dir}")
|
|
||||||
result = subprocess.run([sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
if recreate:
|
||||||
stdout=logger.file, stderr=logger.file)
|
commands_to_run = ["stop", "start"]
|
||||||
result.check_returncode()
|
else:
|
||||||
|
if not deploy_record:
|
||||||
|
commands_to_run = ["start"]
|
||||||
|
else:
|
||||||
|
commands_to_run = ["update"]
|
||||||
|
|
||||||
|
for command in commands_to_run:
|
||||||
|
logger.log(f"Running {command} command on deployment dir: {deployment_dir}")
|
||||||
|
result = subprocess.run(
|
||||||
|
[sys.argv[0], "deployment", "--dir", deployment_dir, command],
|
||||||
|
stdout=logger.file,
|
||||||
|
stderr=logger.file,
|
||||||
|
)
|
||||||
|
result.check_returncode()
|
||||||
|
logger.log(f"Finished {command} command on deployment dir: {deployment_dir}")
|
||||||
|
|
||||||
logger.log("Finished deploying to k8s.")
|
logger.log("Finished deploying to k8s.")
|
||||||
|
|
||||||
|
|
||||||
def publish_deployment(laconic: LaconicRegistryClient,
|
def publish_deployment(
|
||||||
app_record,
|
laconic: LaconicRegistryClient,
|
||||||
deploy_record,
|
app_record,
|
||||||
deployment_lrn,
|
deploy_record,
|
||||||
dns_record,
|
deployment_lrn,
|
||||||
dns_lrn,
|
dns_record,
|
||||||
deployment_dir,
|
dns_lrn,
|
||||||
app_deployment_request=None,
|
deployment_dir,
|
||||||
logger=None):
|
app_deployment_request=None,
|
||||||
|
webapp_deployer_record=None,
|
||||||
|
logger=None,
|
||||||
|
):
|
||||||
if not deploy_record:
|
if not deploy_record:
|
||||||
deploy_ver = "0.0.1"
|
deploy_ver = "0.0.1"
|
||||||
else:
|
else:
|
||||||
deploy_ver = "0.0.%d" % (int(deploy_record.attributes.version.split(".")[-1]) + 1)
|
deploy_ver = "0.0.%d" % (
|
||||||
|
int(deploy_record.attributes.version.split(".")[-1]) + 1
|
||||||
|
)
|
||||||
|
|
||||||
if not dns_record:
|
if not dns_record:
|
||||||
dns_ver = "0.0.1"
|
dns_ver = "0.0.1"
|
||||||
@ -362,9 +716,7 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
"version": dns_ver,
|
"version": dns_ver,
|
||||||
"name": fqdn,
|
"name": fqdn,
|
||||||
"resource_type": "A",
|
"resource_type": "A",
|
||||||
"meta": {
|
"meta": {"so": uniq.hex},
|
||||||
"so": uniq.hex
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
@ -384,13 +736,23 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
"dns": dns_id,
|
"dns": dns_id,
|
||||||
"meta": {
|
"meta": {
|
||||||
"config": file_hash(os.path.join(deployment_dir, "config.env")),
|
"config": file_hash(os.path.join(deployment_dir, "config.env")),
|
||||||
"so": uniq.hex
|
"so": uniq.hex,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if app_deployment_request:
|
if app_deployment_request:
|
||||||
new_deployment_record["record"]["request"] = app_deployment_request.id
|
new_deployment_record["record"]["request"] = app_deployment_request.id
|
||||||
|
|
||||||
|
# Set auction or payment id from request
|
||||||
|
if app_deployment_request.attributes.auction:
|
||||||
|
new_deployment_record["record"]["auction"] = app_deployment_request.attributes.auction
|
||||||
|
elif app_deployment_request.attributes.payment:
|
||||||
|
new_deployment_record["record"]["payment"] = app_deployment_request.attributes.payment
|
||||||
|
|
||||||
|
if webapp_deployer_record:
|
||||||
|
new_deployment_record["record"]["deployer"] = webapp_deployer_record.names[0]
|
||||||
|
|
||||||
if logger:
|
if logger:
|
||||||
logger.log("Publishing ApplicationDeploymentRecord.")
|
logger.log("Publishing ApplicationDeploymentRecord.")
|
||||||
deployment_id = laconic.publish(new_deployment_record, [deployment_lrn])
|
deployment_id = laconic.publish(new_deployment_record, [deployment_lrn])
|
||||||
@ -400,7 +762,9 @@ def publish_deployment(laconic: LaconicRegistryClient,
|
|||||||
def hostname_for_deployment_request(app_deployment_request, laconic):
|
def hostname_for_deployment_request(app_deployment_request, laconic):
|
||||||
dns_name = app_deployment_request.attributes.dns
|
dns_name = app_deployment_request.attributes.dns
|
||||||
if not dns_name:
|
if not dns_name:
|
||||||
app = laconic.get_record(app_deployment_request.attributes.application, require=True)
|
app = laconic.get_record(
|
||||||
|
app_deployment_request.attributes.application, require=True
|
||||||
|
)
|
||||||
dns_name = generate_hostname_for_app(app)
|
dns_name = generate_hostname_for_app(app)
|
||||||
elif dns_name.startswith("lrn://"):
|
elif dns_name.startswith("lrn://"):
|
||||||
record = laconic.get_record(dns_name, require=True)
|
record = laconic.get_record(dns_name, require=True)
|
||||||
@ -432,3 +796,108 @@ def skip_by_tag(r, include_tags, exclude_tags):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min_amount, logger):
|
||||||
|
req_owner = laconic.get_owner(record)
|
||||||
|
if req_owner == payment_address:
|
||||||
|
# No need to confirm payment if the sender and recipient are the same account.
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not record.attributes.payment:
|
||||||
|
logger.log(f"{record.id}: no payment tx info")
|
||||||
|
return False
|
||||||
|
|
||||||
|
tx = laconic.get_tx(record.attributes.payment)
|
||||||
|
if not tx:
|
||||||
|
logger.log(f"{record.id}: cannot locate payment tx")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.code != 0:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.sender != req_owner:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment "
|
||||||
|
f"request owner {req_owner}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tx.recipient != payment_address:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
|
||||||
|
if pay_denom != "alnt":
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
pay_amount = int("".join([i for i in tx.amount if i.isdigit()]))
|
||||||
|
if pay_amount < min_amount:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the payment was already used on a deployment
|
||||||
|
used = laconic.app_deployments(
|
||||||
|
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
|
||||||
|
)
|
||||||
|
if len(used):
|
||||||
|
# Fetch the app name from request record
|
||||||
|
used_request = laconic.get_record(used[0].attributes.request, require=True)
|
||||||
|
|
||||||
|
# Check that payment was used for deployment of same application
|
||||||
|
if record.attributes.application != used_request.attributes.application:
|
||||||
|
logger.log(f"{record.id}: payment {tx.hash} already used on a different application deployment {used}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
used = laconic.app_deployment_removals(
|
||||||
|
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
|
||||||
|
)
|
||||||
|
if len(used):
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger):
|
||||||
|
auction_id = record.attributes.auction
|
||||||
|
auction = laconic.get_auction(auction_id)
|
||||||
|
|
||||||
|
# Fetch auction record for given auction
|
||||||
|
auction_records_by_id = laconic.app_deployment_auctions({"auction": auction_id})
|
||||||
|
if len(auction_records_by_id) == 0:
|
||||||
|
logger.log(f"{record.id}: unable to locate record for auction {auction_id}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Cross check app against application in the auction record
|
||||||
|
requested_app = laconic.get_record(record.attributes.application, require=True)
|
||||||
|
auction_app = laconic.get_record(auction_records_by_id[0].attributes.application, require=True)
|
||||||
|
if requested_app.id != auction_app.id:
|
||||||
|
logger.log(
|
||||||
|
f"{record.id}: requested application {record.attributes.application} does not match application from "
|
||||||
|
f"auction record {auction_records_by_id[0].attributes.application}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not auction:
|
||||||
|
logger.log(f"{record.id}: unable to locate auction {auction_id}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the deployer payment address is in auction winners list
|
||||||
|
if payment_address not in auction.winnerAddresses:
|
||||||
|
logger.log(f"{record.id}: deployer payment address not in auction winners.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
@ -24,7 +24,12 @@ from stack_orchestrator.build import build_webapp
|
|||||||
from stack_orchestrator.deploy.webapp import (run_webapp,
|
from stack_orchestrator.deploy.webapp import (run_webapp,
|
||||||
deploy_webapp,
|
deploy_webapp,
|
||||||
deploy_webapp_from_registry,
|
deploy_webapp_from_registry,
|
||||||
undeploy_webapp_from_registry)
|
undeploy_webapp_from_registry,
|
||||||
|
publish_webapp_deployer,
|
||||||
|
publish_deployment_auction,
|
||||||
|
handle_deployment_auction,
|
||||||
|
request_webapp_deployment,
|
||||||
|
request_webapp_undeployment)
|
||||||
from stack_orchestrator.deploy import deploy
|
from stack_orchestrator.deploy import deploy
|
||||||
from stack_orchestrator import version
|
from stack_orchestrator import version
|
||||||
from stack_orchestrator.deploy import deployment
|
from stack_orchestrator.deploy import deployment
|
||||||
@ -61,6 +66,11 @@ cli.add_command(run_webapp.command, "run-webapp")
|
|||||||
cli.add_command(deploy_webapp.command, "deploy-webapp")
|
cli.add_command(deploy_webapp.command, "deploy-webapp")
|
||||||
cli.add_command(deploy_webapp_from_registry.command, "deploy-webapp-from-registry")
|
cli.add_command(deploy_webapp_from_registry.command, "deploy-webapp-from-registry")
|
||||||
cli.add_command(undeploy_webapp_from_registry.command, "undeploy-webapp-from-registry")
|
cli.add_command(undeploy_webapp_from_registry.command, "undeploy-webapp-from-registry")
|
||||||
|
cli.add_command(publish_webapp_deployer.command, "publish-deployer-to-registry")
|
||||||
|
cli.add_command(publish_deployment_auction.command, "publish-deployment-auction")
|
||||||
|
cli.add_command(handle_deployment_auction.command, "handle-deployment-auction")
|
||||||
|
cli.add_command(request_webapp_deployment.command, "request-webapp-deployment")
|
||||||
|
cli.add_command(request_webapp_undeployment.command, "request-webapp-undeployment")
|
||||||
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
|
cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
|
||||||
cli.add_command(deploy.command, "deploy-system")
|
cli.add_command(deploy.command, "deploy-system")
|
||||||
cli.add_command(deployment.command, "deployment")
|
cli.add_command(deployment.command, "deployment")
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
set -e
|
|
||||||
echo "Running stack-orchestrator Ethereum plugeth fixturenet test"
|
|
||||||
# Bit of a hack, test the most recent package
|
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
|
||||||
CERC_STACK_NAME=fixturenet-plugeth-tx
|
|
||||||
# Set a new unique repo dir
|
|
||||||
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
|
|
||||||
echo "Testing this package: $TEST_TARGET_SO"
|
|
||||||
echo "Test version command"
|
|
||||||
reported_version_string=$( $TEST_TARGET_SO version )
|
|
||||||
echo "Version reported is: ${reported_version_string}"
|
|
||||||
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME setup-repositories
|
|
||||||
echo "Building containers"
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME build-containers
|
|
||||||
echo "Images in registry:"
|
|
||||||
docker image ls
|
|
||||||
echo "Deploying the cluster"
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy up
|
|
||||||
# Verify that the fixturenet is up and running
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy ps
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
|
|
||||||
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-plugeth-tx deploy exec foundry "cast block-number")
|
|
||||||
# Check that the block number increases some time later
|
|
||||||
sleep 12
|
|
||||||
subsequent_block_number=$($TEST_TARGET_SO --stack $CERC_STACK_NAME deploy exec foundry "cast block-number")
|
|
||||||
block_number_difference=$((subsequent_block_number - initial_block_number))
|
|
||||||
# Block height difference should be between 1 and some small number
|
|
||||||
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 10 ]]; then
|
|
||||||
echo "Test passed"
|
|
||||||
test_result=0
|
|
||||||
else
|
|
||||||
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
|
|
||||||
test_result=1
|
|
||||||
fi
|
|
||||||
$TEST_TARGET_SO --stack $CERC_STACK_NAME deploy down
|
|
||||||
echo "Removing cloned repositories"
|
|
||||||
rm -rf $CERC_REPO_BASE_DIR
|
|
||||||
exit $test_result
|
|
@ -1,80 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Ethereum fixturenet test"
|
|
||||||
# Bit of a hack, test the most recent package
|
|
||||||
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
|
||||||
# Set a new unique repo dir
|
|
||||||
export CERC_REPO_BASE_DIR=$(mktemp -d stack-orchestrator-fixturenet-eth-test.XXXXXXXXXX)
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO"
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Test version command"
|
|
||||||
reported_version_string=$( $TEST_TARGET_SO version )
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Version reported is: ${reported_version_string}"
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Cloning repositories into: $CERC_REPO_BASE_DIR"
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Building containers"
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth build-containers
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Starting stack"
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy up
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Stack started"
|
|
||||||
# Verify that the fixturenet is up and running
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy ps
|
|
||||||
# echo "$(date +"%Y-%m-%d %T"): Getting stack status"
|
|
||||||
# $TEST_TARGET_SO --stack fixturenet-eth deploy exec fixturenet-eth-bootnode-lighthouse /scripts/status-internal.sh
|
|
||||||
|
|
||||||
timeout=900 # 15 minutes
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Getting initial block number. Timeout set to $timeout seconds"
|
|
||||||
start_time=$(date +%s)
|
|
||||||
elapsed_time=0
|
|
||||||
initial_block_number=0
|
|
||||||
while [ "$initial_block_number" -eq 0 ] && [ $elapsed_time -lt $timeout ]; do
|
|
||||||
sleep 10
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Waiting for initial block..."
|
|
||||||
initial_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
|
||||||
current_time=$(date +%s)
|
|
||||||
elapsed_time=$((current_time - start_time))
|
|
||||||
done
|
|
||||||
|
|
||||||
subsequent_block_number=$initial_block_number
|
|
||||||
|
|
||||||
# if initial block was 0 after timeout, assume chain did not start successfully and skip finding subsequent block
|
|
||||||
if [[ $initial_block_number -gt 0 ]]; then
|
|
||||||
timeout=300
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Getting subsequent block number. Timeout set to $timeout seconds"
|
|
||||||
start_time=$(date +%s)
|
|
||||||
elapsed_time=0
|
|
||||||
# wait for 5 blocks or timeout
|
|
||||||
while [ "$subsequent_block_number" -le $((initial_block_number + 5)) ] && [ $elapsed_time -lt $timeout ]; do
|
|
||||||
sleep 10
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Waiting for five blocks or $timeout seconds..."
|
|
||||||
subsequent_block_number=$($TEST_TARGET_SO --stack fixturenet-eth deploy exec foundry "cast block-number")
|
|
||||||
current_time=$(date +%s)
|
|
||||||
elapsed_time=$((current_time - start_time))
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# will return 0 if either of the above loops timed out
|
|
||||||
block_number_difference=$((subsequent_block_number - initial_block_number))
|
|
||||||
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Results of block height queries:"
|
|
||||||
echo "Initial block height: $initial_block_number"
|
|
||||||
echo "Subsequent block height: $subsequent_block_number"
|
|
||||||
|
|
||||||
# Block height difference should be between 1 and some small number
|
|
||||||
if [[ $block_number_difference -gt 1 && $block_number_difference -lt 100 ]]; then
|
|
||||||
echo "Test passed"
|
|
||||||
test_result=0
|
|
||||||
else
|
|
||||||
echo "Test failed: block numbers were ${initial_block_number} and ${subsequent_block_number}"
|
|
||||||
echo "Logs from stack:"
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy logs
|
|
||||||
test_result=1
|
|
||||||
fi
|
|
||||||
$TEST_TARGET_SO --stack fixturenet-eth deploy down --delete-volumes
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories"
|
|
||||||
rm -rf $CERC_REPO_BASE_DIR
|
|
||||||
echo "$(date +"%Y-%m-%d %T"): Test finished"
|
|
||||||
exit $test_result
|
|
222
tests/k8s-deployment-control/run-test.sh
Executable file
222
tests/k8s-deployment-control/run-test.sh
Executable file
@ -0,0 +1,222 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
# Dump environment variables for debugging
|
||||||
|
echo "Environment variables:"
|
||||||
|
env
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$1" == "from-path" ]; then
|
||||||
|
TEST_TARGET_SO="laconic-so"
|
||||||
|
else
|
||||||
|
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Helper functions: TODO move into a separate file
|
||||||
|
wait_for_pods_started () {
|
||||||
|
for i in {1..50}
|
||||||
|
do
|
||||||
|
local ps_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir ps )
|
||||||
|
|
||||||
|
if [[ "$ps_output" == *"Running containers:"* ]]; then
|
||||||
|
# if ready, return
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# if not ready, wait
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Timed out, error exit
|
||||||
|
echo "waiting for pods to start: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_log_output () {
|
||||||
|
for i in {1..50}
|
||||||
|
do
|
||||||
|
|
||||||
|
local log_output=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
|
||||||
|
if [[ ! -z "$log_output" ]]; then
|
||||||
|
# if ready, return
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# if not ready, wait
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Timed out, error exit
|
||||||
|
echo "waiting for pods log content: FAILED"
|
||||||
|
delete_cluster_exit
|
||||||
|
}
|
||||||
|
|
||||||
|
delete_cluster_exit () {
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set a non-default repo dir
|
||||||
|
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
|
||||||
|
echo "Testing this package: $TEST_TARGET_SO"
|
||||||
|
echo "Test version command"
|
||||||
|
reported_version_string=$( $TEST_TARGET_SO version )
|
||||||
|
echo "Version reported is: ${reported_version_string}"
|
||||||
|
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
||||||
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
|
mkdir -p $CERC_REPO_BASE_DIR
|
||||||
|
$TEST_TARGET_SO --stack test setup-repositories
|
||||||
|
$TEST_TARGET_SO --stack test build-containers
|
||||||
|
# Test basic stack-orchestrator deploy to k8s
|
||||||
|
test_deployment_dir=$CERC_REPO_BASE_DIR/test-deployment-dir
|
||||||
|
test_deployment_spec=$CERC_REPO_BASE_DIR/test-deployment-spec.yml
|
||||||
|
|
||||||
|
# Create a deployment that we can use to check our test cases
|
||||||
|
$TEST_TARGET_SO --stack test deploy --deploy-to k8s-kind init --output $test_deployment_spec
|
||||||
|
# Check the file now exists
|
||||||
|
if [ ! -f "$test_deployment_spec" ]; then
|
||||||
|
echo "deploy init test: spec file not present"
|
||||||
|
echo "deploy init test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy init test: passed"
|
||||||
|
|
||||||
|
$TEST_TARGET_SO --stack test deploy create --spec-file $test_deployment_spec --deployment-dir $test_deployment_dir
|
||||||
|
# Check the deployment dir exists
|
||||||
|
if [ ! -d "$test_deployment_dir" ]; then
|
||||||
|
echo "deploy create test: deployment directory not present"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy create test: passed"
|
||||||
|
# Check the file writted by the create command in the stack now exists
|
||||||
|
if [ ! -f "$test_deployment_dir/create-file" ]; then
|
||||||
|
echo "deploy create test: create output file not present"
|
||||||
|
echo "deploy create test: FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "deploy create output file test: passed"
|
||||||
|
|
||||||
|
# At this point the deployment's kind-config.yml will look like this:
|
||||||
|
# kind: Cluster
|
||||||
|
# apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
# nodes:
|
||||||
|
# - role: control-plane
|
||||||
|
# kubeadmConfigPatches:
|
||||||
|
# - |
|
||||||
|
# kind: InitConfiguration
|
||||||
|
# nodeRegistration:
|
||||||
|
# kubeletExtraArgs:
|
||||||
|
# node-labels: "ingress-ready=true"
|
||||||
|
# extraPortMappings:
|
||||||
|
# - containerPort: 80
|
||||||
|
# hostPort: 80
|
||||||
|
|
||||||
|
# We need to change it to this:
|
||||||
|
# Note we also turn up the log level on the scheduler in order to diagnose placement errors
|
||||||
|
# See logs like: kubectl -n kube-system logs kube-scheduler-laconic-f185cd245d8dba98-control-plane
|
||||||
|
kind_config_file=${test_deployment_dir}/kind-config.yml
|
||||||
|
cat << EOF > ${kind_config_file}
|
||||||
|
kind: Cluster
|
||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
kubeadmConfigPatches:
|
||||||
|
- |
|
||||||
|
kind: ClusterConfiguration
|
||||||
|
scheduler:
|
||||||
|
extraArgs:
|
||||||
|
v: "3"
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
kubeadmConfigPatches:
|
||||||
|
- |
|
||||||
|
kind: InitConfiguration
|
||||||
|
nodeRegistration:
|
||||||
|
kubeletExtraArgs:
|
||||||
|
node-labels: "ingress-ready=true"
|
||||||
|
extraPortMappings:
|
||||||
|
- containerPort: 80
|
||||||
|
hostPort: 80
|
||||||
|
- role: worker
|
||||||
|
labels:
|
||||||
|
nodetype: a
|
||||||
|
- role: worker
|
||||||
|
labels:
|
||||||
|
nodetype: b
|
||||||
|
- role: worker
|
||||||
|
labels:
|
||||||
|
nodetype: c
|
||||||
|
kubeadmConfigPatches:
|
||||||
|
- |
|
||||||
|
kind: JoinConfiguration
|
||||||
|
nodeRegistration:
|
||||||
|
taints:
|
||||||
|
- key: "nodeavoid"
|
||||||
|
value: "c"
|
||||||
|
effect: "NoSchedule"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# At this point we should have 4 nodes, three labeled like this:
|
||||||
|
# $ kubectl get nodes --show-labels=true
|
||||||
|
# NAME STATUS ROLES AGE VERSION LABELS
|
||||||
|
# laconic-3af549a3ba0e3a3c-control-plane Ready control-plane 2m37s v1.30.0 ...,ingress-ready=true
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker Ready <none> 2m18s v1.30.0 ...,nodetype=a
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker2 Ready <none> 2m18s v1.30.0 ...,nodetype=b
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker3 Ready <none> 2m18s v1.30.0 ...,nodetype=c
|
||||||
|
|
||||||
|
# And with taints like this:
|
||||||
|
# $ kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints --no-headers
|
||||||
|
# laconic-3af549a3ba0e3a3c-control-plane [map[effect:NoSchedule key:node-role.kubernetes.io/control-plane]]
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker <none>
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker2 <none>
|
||||||
|
# laconic-3af549a3ba0e3a3c-worker3 [map[effect:NoSchedule key:nodeavoid value:c]]
|
||||||
|
|
||||||
|
# We can now modify the deployment spec file to require a set of affinity and/or taint combinations
|
||||||
|
# then bring up the deployment and check that the pod is scheduled to an expected node.
|
||||||
|
|
||||||
|
# Add a requirement to schedule on a node labeled nodetype=c and
|
||||||
|
# a toleration such that no other pods schedule on that node
|
||||||
|
deployment_spec_file=${test_deployment_dir}/spec.yml
|
||||||
|
cat << EOF >> ${deployment_spec_file}
|
||||||
|
node-affinities:
|
||||||
|
- label: nodetype
|
||||||
|
value: c
|
||||||
|
node-tolerations:
|
||||||
|
- key: nodeavoid
|
||||||
|
value: c
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Get the deployment ID so we can generate low level kubectl commands later
|
||||||
|
deployment_id=$(cat ${test_deployment_dir}/deployment.yml | cut -d ' ' -f 2)
|
||||||
|
|
||||||
|
# Try to start the deployment
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir start
|
||||||
|
wait_for_pods_started
|
||||||
|
# Check logs command works
|
||||||
|
wait_for_log_output
|
||||||
|
sleep 1
|
||||||
|
log_output_1=$( $TEST_TARGET_SO deployment --dir $test_deployment_dir logs )
|
||||||
|
if [[ "$log_output_1" == *"filesystem is fresh"* ]]; then
|
||||||
|
echo "deployment of pod test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment pod test: FAILED"
|
||||||
|
echo $log_output_1
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The deployment's pod should be scheduled onto node: worker3
|
||||||
|
# Check that's what happened
|
||||||
|
# Get get the node onto which the stack pod has been deployed
|
||||||
|
deployment_node=$(kubectl get pods -l app=${deployment_id} -o=jsonpath='{.items..spec.nodeName}')
|
||||||
|
expected_node=${deployment_id}-worker3
|
||||||
|
echo "Stack pod deployed to node: ${deployment_node}"
|
||||||
|
if [[ ${deployment_node} == ${expected_node} ]]; then
|
||||||
|
echo "deployment of pod test: passed"
|
||||||
|
else
|
||||||
|
echo "deployment pod test: FAILED"
|
||||||
|
echo "Stack pod deployed to node: ${deployment_node}, expected node: ${expected_node}"
|
||||||
|
delete_cluster_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop and clean up
|
||||||
|
$TEST_TARGET_SO deployment --dir $test_deployment_dir stop --delete-volumes
|
||||||
|
echo "Test passed"
|
@ -4,6 +4,10 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
|||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
set_ownership () {
|
||||||
|
sudo chown $USER: -R $1
|
||||||
|
}
|
||||||
|
|
||||||
node_count=4
|
node_count=4
|
||||||
node_dir_prefix="laconic-network-dir"
|
node_dir_prefix="laconic-network-dir"
|
||||||
chain_id="laconic_81337-6"
|
chain_id="laconic_81337-6"
|
||||||
@ -15,7 +19,7 @@ do
|
|||||||
node_network_dir=${node_dir_prefix}${i}
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
if [[ -d $node_network_dir ]]; then
|
if [[ -d $node_network_dir ]]; then
|
||||||
echo "Deleting ${node_network_dir}"
|
echo "Deleting ${node_network_dir}"
|
||||||
rm -rf ${node_network_dir}
|
sudo rm -rf ${node_network_dir}
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
echo "Deleting any existing deployments..."
|
echo "Deleting any existing deployments..."
|
||||||
@ -39,6 +43,7 @@ do
|
|||||||
node_network_dir=${node_dir_prefix}${i}
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
node_moniker=${node_moniker_prefix}${i}
|
node_moniker=${node_moniker_prefix}${i}
|
||||||
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --initialize-network --chain-id ${chain_id} --node-moniker ${node_moniker}
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --initialize-network --chain-id ${chain_id} --node-moniker ${node_moniker}
|
||||||
|
set_ownership ${node_network_dir}
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Joining ${node_count} nodes to the network..."
|
echo "Joining ${node_count} nodes to the network..."
|
||||||
@ -47,6 +52,7 @@ do
|
|||||||
node_network_dir=${node_dir_prefix}${i}
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
node_moniker=${node_moniker_prefix}${i}
|
node_moniker=${node_moniker_prefix}${i}
|
||||||
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --join-network --key-name ${node_moniker}
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --join-network --key-name ${node_moniker}
|
||||||
|
set_ownership ${node_network_dir}
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Merging ${node_count} nodes genesis txns..."
|
echo "Merging ${node_count} nodes genesis txns..."
|
||||||
@ -57,11 +63,15 @@ for (( i=2 ; i<=$node_count ; i++ ));
|
|||||||
do
|
do
|
||||||
node_network_dir=${node_dir_prefix}${i}
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
node_gentx_file=$(ls ${node_network_dir}/config/gentx/*.json)
|
node_gentx_file=$(ls ${node_network_dir}/config/gentx/*.json)
|
||||||
|
node_gentx_address=$(grep address ${node_network_dir}/config/genesis.json | head -1 | cut -d '"' -f 4)
|
||||||
gentx_files+=${delimeter}${node_gentx_file}
|
gentx_files+=${delimeter}${node_gentx_file}
|
||||||
|
gentx_addresses+=${delimeter}${node_gentx_address}
|
||||||
delimeter=","
|
delimeter=","
|
||||||
done
|
done
|
||||||
|
echo "gentx files:"
|
||||||
|
echo ${gentx_files}
|
||||||
# Generate the genesis file on node 1
|
# Generate the genesis file on node 1
|
||||||
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_dir_prefix}1 --create-network --gentx-files ${gentx_files}
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_dir_prefix}1 --create-network --gentx-files ${gentx_files} --gentx-addresses ${gentx_addresses}
|
||||||
genesis_file=${node_dir_prefix}1/config/genesis.json
|
genesis_file=${node_dir_prefix}1/config/genesis.json
|
||||||
# Now import the genesis file to the other nodes
|
# Now import the genesis file to the other nodes
|
||||||
for (( i=2 ; i<=$node_count ; i++ ));
|
for (( i=2 ; i<=$node_count ; i++ ));
|
||||||
@ -69,6 +79,7 @@ do
|
|||||||
echo "Importing genesis.json into node ${i}"
|
echo "Importing genesis.json into node ${i}"
|
||||||
node_network_dir=${node_dir_prefix}${i}
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --create-network --genesis-file ${genesis_file}
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --create-network --genesis-file ${genesis_file}
|
||||||
|
set_ownership ${node_network_dir}
|
||||||
done
|
done
|
||||||
|
|
||||||
# Create deployments
|
# Create deployments
|
||||||
|
@ -20,7 +20,7 @@ echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
|
|||||||
rm -rf $CERC_REPO_BASE_DIR
|
rm -rf $CERC_REPO_BASE_DIR
|
||||||
mkdir -p $CERC_REPO_BASE_DIR
|
mkdir -p $CERC_REPO_BASE_DIR
|
||||||
# Pull an example small public repo to test we can pull a repo
|
# Pull an example small public repo to test we can pull a repo
|
||||||
$TEST_TARGET_SO setup-repositories --include cerc-io/laconic-sdk
|
$TEST_TARGET_SO setup-repositories --include cerc-io/registry-sdk
|
||||||
# Test pulling a stack
|
# Test pulling a stack
|
||||||
$TEST_TARGET_SO --stack test setup-repositories
|
$TEST_TARGET_SO --stack test setup-repositories
|
||||||
# Test building the a stack container
|
# Test building the a stack container
|
||||||
|
Loading…
Reference in New Issue
Block a user