Add ops instructions to setup snowball frontend and backend (#32)
Part of [Service provider auctions for web deployments](https://www.notion.so/Service-provider-auctions-for-web-deployments-104a6b22d47280dbad51d28aa3a91d75) - Update instructions for nitro node deployments Co-authored-by: Adw8 <adwaitgharpure@gmail.com> Co-authored-by: Shreerang Kale <shreerangkale@gmail.com> Co-authored-by: Neeraj <neeraj.rtly@gmail.com> Reviewed-on: #32 Co-authored-by: Prathamesh Musale <prathamesh.musale0@gmail.com> Co-committed-by: Prathamesh Musale <prathamesh.musale0@gmail.com>
This commit is contained in:
parent
9f86b2f2b5
commit
a8ffb987b1
40
ops/configs/backend-config.toml
Normal file
40
ops/configs/backend-config.toml
Normal file
@ -0,0 +1,40 @@
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 8000
|
||||
gqlPath = "/graphql"
|
||||
[server.session]
|
||||
secret = "<redacted>"
|
||||
# Frontend webapp URL origin
|
||||
appOriginUrl = "https://deploy.apps.vaasl.io"
|
||||
# Set to true if server running behind proxy
|
||||
trustProxy = true
|
||||
# Backend URL hostname
|
||||
domain = "deploy-backend.apps.vaasl.io"
|
||||
|
||||
[database]
|
||||
dbPath = "/data/db/deploy-backend"
|
||||
|
||||
[gitHub]
|
||||
webhookUrl = "https://deploy-backend.apps.vaasl.io"
|
||||
[gitHub.oAuth]
|
||||
clientId = "<redacted>"
|
||||
clientSecret = "<redacted>"
|
||||
|
||||
[registryConfig]
|
||||
fetchDeploymentRecordDelay = 5000
|
||||
checkAuctionStatusDelay = 5000
|
||||
restEndpoint = "https://laconicd-sapo.laconic.com"
|
||||
gqlEndpoint = "https://laconicd-sapo.laconic.com/api"
|
||||
chainId = "laconic-testnet-2"
|
||||
privateKey = "<redacted>"
|
||||
bondId = "<redacted>"
|
||||
authority = "vaasl"
|
||||
[registryConfig.fee]
|
||||
gasPrice = "0.001alnt"
|
||||
|
||||
[auction]
|
||||
commitFee = "1000"
|
||||
commitsDuration = "60s"
|
||||
revealFee = "1000"
|
||||
revealsDuration = "60s"
|
||||
denom = "alnt"
|
@ -191,10 +191,12 @@
|
||||
initial_token_supply: "129600"
|
||||
```
|
||||
|
||||
* Update the target dir in `setup-vars.yml`:
|
||||
* Edit the `setup-vars.yml` to update the target directory:
|
||||
|
||||
```bash
|
||||
sed -i 's|^nitro_directory:.*|nitro_directory: /srv/bridge|' setup-vars.yml
|
||||
...
|
||||
nitro_directory: /srv/bridge
|
||||
...
|
||||
|
||||
# Will create deployment at /srv/bridge/nitro-contracts-deployment
|
||||
```
|
||||
@ -343,10 +345,12 @@
|
||||
ca_address: ""
|
||||
```
|
||||
|
||||
* Update the target dir in `setup-vars.yml`:
|
||||
* Edit the `setup-vars.yml` to update the target directory:
|
||||
|
||||
```bash
|
||||
sed -i 's|^nitro_directory:.*|nitro_directory: /srv/bridge|' setup-vars.yml
|
||||
...
|
||||
nitro_directory: /srv/bridge
|
||||
...
|
||||
|
||||
# Will create deployment at /srv/bridge/nitro-contracts-deployment and /srv/bridge/bridge-deployment
|
||||
```
|
||||
@ -1234,6 +1238,301 @@
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary>deploy-backend</summary>
|
||||
|
||||
## Deploy Backend
|
||||
|
||||
* Stack: <https://git.vdb.to/cerc-io/snowballtools-base-api-stack/src/branch/main/stack-orchestrator/stacks/snowballtools-base-backend>
|
||||
|
||||
* Source repo: <https://git.vdb.to/cerc-io/snowballtools-base>
|
||||
|
||||
* Target dir: `/srv/deploy-backend/backend-deployment`
|
||||
|
||||
* Cleanup an existing deployment if required:
|
||||
|
||||
```bash
|
||||
cd /srv/deploy-backend
|
||||
|
||||
# Stop the deployment
|
||||
laconic-so deployment --dir backend-deployment stop --delete-volumes
|
||||
|
||||
# Remove the deployment dir
|
||||
sudo rm -rf backend-deployment
|
||||
|
||||
# Remove the existing spec file
|
||||
rm backend-deployment-spec.yml
|
||||
```
|
||||
|
||||
### Setup
|
||||
|
||||
* Clone the stack repo:
|
||||
|
||||
```bash
|
||||
laconic-so fetch-stack git.vdb.to/cerc-io/snowballtools-base-api-stack --pull
|
||||
|
||||
# This should clone the snowballtools-base-api-stack repo at `/home/dev/cerc/snowballtools-base-api-stack`
|
||||
```
|
||||
|
||||
* Clone required repositories:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ~/cerc/snowballtools-base-api-stack/stack-orchestrator/stacks/snowballtools-base-backend setup-repositories --git-ssh --pull
|
||||
|
||||
# This should clone the snowballtools-base repo at `/home/dev/cerc/snowballtools-base`
|
||||
```
|
||||
|
||||
* Build the container images:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ~/cerc/snowballtools-base-api-stack/stack-orchestrator/stacks/snowballtools-base-backend build-containers --force-rebuild
|
||||
|
||||
# This should create the Docker images: "cerc/snowballtools-base-backend" and "cerc/snowballtools-base-backend-base"
|
||||
```
|
||||
|
||||
* Push the images to the container registry. The container registry will be set up while setting up a service provider
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir backend-deployment push-images
|
||||
```
|
||||
|
||||
### Deployment
|
||||
|
||||
* Create a spec file for the deployment:
|
||||
|
||||
```bash
|
||||
cd /srv/backend-deployment
|
||||
|
||||
laconic-so --stack ~/cerc/snowballtools-base-api-stack/stack-orchestrator/stacks/snowballtools-base-backend deploy init --output backend-deployment-spec.yml --config SNOWBALL_BACKEND_CONFIG_FILE_PATH=/config/prod.toml
|
||||
```
|
||||
|
||||
* Edit the spec file to deploy the stack to k8s:
|
||||
|
||||
```bash
|
||||
stack:
|
||||
/home/dev/cerc/snowballtools-base-api-stack/stack-orchestrator/stacks/snowballtools-base-backend
|
||||
deploy-to: k8s
|
||||
kube-config: /home/dev/.kube/config-vs-narwhal.yaml
|
||||
image-registry: container-registry.apps.vaasl.io/laconic-registry
|
||||
config:
|
||||
SNOWBALL_BACKEND_CONFIG_FILE_PATH: /config/prod.toml
|
||||
network:
|
||||
ports:
|
||||
deploy-backend:
|
||||
- '8000'
|
||||
http-proxy:
|
||||
- host-name: deploy-backend.apps.vaasl.io
|
||||
routes:
|
||||
- path: '/'
|
||||
proxy-to: deploy-backend:8000
|
||||
volumes:
|
||||
data:
|
||||
configmaps:
|
||||
config: ./configmaps/config
|
||||
```
|
||||
|
||||
* Create a deployment from the spec file:
|
||||
|
||||
```bash
|
||||
laconic-so --stack ~/cerc/snowballtools-base-api-stack/stack-orchestrator/stacks/snowballtools-base-backend deploy create --deployment-dir backend-deployment --spec-file backend-deployment-spec.yml
|
||||
# This should create the deployment directory at `/srv/deploy-backend/backend-deployment`
|
||||
```
|
||||
|
||||
* Modify file `backend-deployment/kubeconfig.yml` if required
|
||||
```
|
||||
apiVersion: v1
|
||||
...
|
||||
contexts:
|
||||
- context:
|
||||
cluster: ***
|
||||
user: ***
|
||||
name: default
|
||||
...
|
||||
```
|
||||
NOTE: `context.name` must be default to use with SO
|
||||
|
||||
* Fetch the config template file for the snowball backend:
|
||||
|
||||
```bash
|
||||
# Place in snowball deployment directory
|
||||
wget -O /srv/deploy-backend/backend-deployment/configmaps/config/prod.toml https://git.vdb.to/cerc-io/testnet-laconicd-stack/raw/branch/main/ops/configs/backend-deployment.toml
|
||||
```
|
||||
|
||||
* Setup private key and bond. If not already setup, execute the following commands in the directory containing `stage2-deployment`
|
||||
|
||||
* Create a new account and fetch the private key
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir stage2-deployment exec laconicd "laconicd keys add deploy"
|
||||
# - address: laconic1yr758d5vkg28text073vlzdjdgd7ud6w729tww
|
||||
...
|
||||
export deployKey=$(laconic-so deployment --dir stage2-deployment exec laconicd "echo y | laconicd keys export deploy --keyring-backend test --unarmored-hex --unsafe")
|
||||
# ...
|
||||
# txhash: 262D380259AC06024F87C909EB0BF7814CEC26CDF527B003C4C10631E1DB5893
|
||||
```
|
||||
|
||||
* Send tokens to this account
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir stage2-deployment exec laconicd "laconicd tx bank send alice laconic1yr758d5vkg28text073vlzdjdgd7ud6w729tww 1000000000000000000alnt --from alice --fees 200000alnt -y"
|
||||
```
|
||||
|
||||
* Create a bond using this account
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry bond create --type alnt --quantity 1000000000000 --user-key $deployKey" | jq -r '.bondId'
|
||||
# 15e5bc37c40f67adc9ab498fa3fa50b090770f9bb56b27d71714a99138df9a22
|
||||
```
|
||||
|
||||
* Set bond id
|
||||
|
||||
```bash
|
||||
export bondId=15e5bc37c40f67adc9ab498fa3fa50b090770f9bb56b27d71714a99138df9a22
|
||||
```
|
||||
|
||||
* Register authority. Execute the following commands in the directory containing `laconic-console-testnet2-deployment`
|
||||
|
||||
* Reserve an authority
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry authority reserve deploy-vaasl --txKey $deployKey"
|
||||
```
|
||||
|
||||
* Obtain the auction ID
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry authority whois deploy-vaasl --txKey $deployKey"
|
||||
# "auction": {
|
||||
# "id": "73e0b082a198c396009ce748804a9060c674a10045365d262c1584f99d2771c1"
|
||||
```
|
||||
|
||||
* Commit a bid using the auction ID. A reveal file will be generated
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry auction bid commit 73e0b082a198c396009ce748804a9060c674a10045365d262c1584f99d2771c1 5000000 alnt --chain-id laconic-testnet-2 --txKey $deployKey"
|
||||
|
||||
# {"reveal_file":"/app/out/bafyreiewi4osqyvrnljwwcb36fn6sr5iidfpuznqkz52gxc5ztt3jt4zmy.json"}
|
||||
```
|
||||
|
||||
* Reveal a bid using the auction ID and the reveal file generated from the bid commit
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry auction bid reveal 73e0b082a198c396009ce748804a9060c674a10045365d262c1584f99d2771c1 /app/out/bafyreiewi4osqyvrnljwwcb36fn6sr5iidfpuznqkz52gxc5ztt3jt4zmy.json --chain-id laconic-testnet-2 --txKey $deployKey"
|
||||
# {"success": true}
|
||||
```
|
||||
|
||||
* Verify status after the auction ends. It should list a completed status and a winner
|
||||
|
||||
```
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry auction get 73e0b082a198c396009ce748804a9060c674a10045365d262c1584f99d2771c1 -txKey $deployKey"
|
||||
```
|
||||
|
||||
* Set the authority using a bond ID.
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry authority bond set deploy-vaasl $bondId --txKey $deployKey"
|
||||
# {"success": true}
|
||||
```
|
||||
|
||||
* Verify the authority has been registered.
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir laconic-console-testnet2-deployment exec cli "laconic registry authority whois deploy-vaasl --txKey $deployKey"
|
||||
```
|
||||
|
||||
|
||||
* Update `/srv/snowball/snowball-deployment/data/config/prod.toml`. Replace `<redacted>` with your credentials. Use the `userKey`, `bondId` and `authority` that you set up
|
||||
|
||||
### Start
|
||||
|
||||
* Start the deployment:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir backend-deployment start
|
||||
```
|
||||
|
||||
* Check status:
|
||||
|
||||
```bash
|
||||
# Follow logs for snowball container
|
||||
laconic-so deployment --dir backend-deployment logs snowballtools-base-backend -f
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary>deploy-frontend</summary>
|
||||
|
||||
## Deploy Frontend
|
||||
|
||||
* Source repo: <https://git.vdb.to/cerc-io/snowballtools-base>
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* Node.js
|
||||
|
||||
* Yarn
|
||||
|
||||
### Setup
|
||||
|
||||
* On your local machine, clone the `snowballtools-base` repo:
|
||||
|
||||
```bash
|
||||
git clone git@git.vdb.to:cerc-io/snowballtools-base.git
|
||||
```
|
||||
|
||||
* Install dependencies:
|
||||
|
||||
```bash
|
||||
cd snowballtools-base
|
||||
yarn install
|
||||
```
|
||||
|
||||
* In the deployer package, create required env:
|
||||
|
||||
```bash
|
||||
cd packages/deployer
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Set the required variables:
|
||||
|
||||
```bash
|
||||
REGISTRY_BOND_ID=<bond-id>
|
||||
DEPLOYER_LRN=lrn://vaasl-provider/deployers/webapp-deployer-api.apps.vaasl.io
|
||||
AUTHORITY=vaasl
|
||||
```
|
||||
Note: The bond id should be set to the `vaasl` authority
|
||||
|
||||
* Update required laconic config. You can use the same `userKey` and `bondId` used for deploying backend:
|
||||
|
||||
```bash
|
||||
# Replace <user-pk> and <bond-id>
|
||||
cat <<EOF > config.yml
|
||||
services:
|
||||
registry:
|
||||
rpcEndpoint: https://laconicd-sapo.laconic.com
|
||||
gqlEndpoint: https://laconicd-sapo.laconic.com/api
|
||||
userKey: <user-pk>
|
||||
bondId: <bond-id>
|
||||
chainId: laconic-testnet-2
|
||||
gasPrice: 0.001alnt
|
||||
EOF
|
||||
```
|
||||
Note: The `userKey` account should own the authority `vaasl`
|
||||
|
||||
### Run
|
||||
|
||||
* Run frontend deployment script:
|
||||
|
||||
```bash
|
||||
./deploy-frontend.sh
|
||||
```
|
||||
|
||||
Follow deployment logs on the deployer UI
|
||||
|
||||
</details>
|
||||
|
||||
## Domains / Port Mappings
|
||||
|
||||
```bash
|
||||
|
@ -1,6 +1,8 @@
|
||||
# Nitro Token Ops
|
||||
|
||||
## Setup
|
||||
## Deploy and transfer custom tokens
|
||||
|
||||
### Setup
|
||||
|
||||
* Go to the directory where `nitro-contracts-deployment` is present:
|
||||
|
||||
@ -8,7 +10,7 @@
|
||||
cd /srv/bridge
|
||||
```
|
||||
|
||||
## Deploy new token
|
||||
### Deploy new token
|
||||
|
||||
* To deploy another token:
|
||||
|
||||
@ -48,7 +50,7 @@
|
||||
|
||||
* Check in the generated file at location `ops/stage2/assets.json` within this repository
|
||||
|
||||
## Transfer deployed tokens to given address
|
||||
### Transfer deployed tokens to given address
|
||||
|
||||
* To transfer a token to an account:
|
||||
|
||||
@ -57,7 +59,25 @@
|
||||
export TOKEN_NAME="<name-of-token-to-be-transferred>"
|
||||
export ASSET_ADDRESS=$(laconic-so deployment --dir nitro-contracts-deployment exec nitro-contracts "jq -r '.\"$GETH_CHAIN_ID\"[0].contracts.$TOKEN_NAME.address' /app/deployment/nitro-addresses.json")
|
||||
export ACCOUNT="<target-account-address>"
|
||||
export AMOUNT="<transfer-amount>"
|
||||
|
||||
laconic-so deployment --dir nitro-contracts-deployment exec nitro-contracts "cd packages/nitro-protocol && yarn hardhat transfer --contract $ASSET_ADDRESS --to $ACCOUNT --amount 1000 --network geth"
|
||||
laconic-so deployment --dir nitro-contracts-deployment exec nitro-contracts "cd packages/nitro-protocol && yarn hardhat transfer --contract $ASSET_ADDRESS --to $ACCOUNT --amount 100 --network geth"
|
||||
```
|
||||
|
||||
## Transfer ETH
|
||||
|
||||
* Go to the directory where `fixturenet-eth-deployment` is present:
|
||||
|
||||
```bash
|
||||
cd /srv/fixturenet-eth
|
||||
```
|
||||
|
||||
* To transfer ETH to an account:
|
||||
|
||||
```bash
|
||||
export FUNDED_ADDRESS="0xe6CE22afe802CAf5fF7d3845cec8c736ecc8d61F"
|
||||
export FUNDED_PK="888814df89c4358d7ddb3fa4b0213e7331239a80e1f013eaa7b2deca2a41a218"
|
||||
|
||||
export TO_ADDRESS="<target-account-address>"
|
||||
|
||||
laconic-so deployment --dir fixturenet-eth-deployment exec foundry "cast send $TO_ADDRESS --value 1ether --from $FUNDED_ADDRESS --private-key $FUNDED_PK"
|
||||
```
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
* On deployment machine:
|
||||
|
||||
* User with passwordless sudo: see [setup](https://git.vdb.to/cerc-io/testnet-ops/src/branch/main/user-setup/README.md#user-setup)
|
||||
|
||||
* laconic-so: see [installation](https://git.vdb.to/cerc-io/testnet-ops/src/branch/main/stack-orchestrator-setup/README.md#setup-stack-orchestrator)
|
||||
|
||||
## Setup
|
||||
@ -28,43 +30,68 @@
|
||||
|
||||
```bash
|
||||
wget -O nitro-vars.yml https://git.vdb.to/cerc-io/testnet-laconicd-stack/raw/branch/main/ops/stage2/nitro-node-config.yml
|
||||
|
||||
# Expected variables in the fetched config file:
|
||||
|
||||
# nitro_chain_url: ""
|
||||
# na_address: ""
|
||||
# ca_address: ""
|
||||
# vpa_address: ""
|
||||
# bridge_nitro_address: ""
|
||||
# nitro_l1_bridge_multiaddr: ""
|
||||
# nitro_l2_bridge_multiaddr: ""
|
||||
```
|
||||
|
||||
* Fetch required asset addresses:
|
||||
|
||||
```bash
|
||||
wget -O assets.json https://git.vdb.to/cerc-io/testnet-laconicd-stack/raw/branch/main/ops/stage2/assets.json
|
||||
|
||||
# Example output:
|
||||
# {
|
||||
# "1212": [
|
||||
# {
|
||||
# "name": "geth",
|
||||
# "chainId": "1212",
|
||||
# "contracts": {
|
||||
# "TestToken": {
|
||||
# "address": "0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9"
|
||||
# },
|
||||
# "TestToken2": {
|
||||
# "address": "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
```
|
||||
|
||||
* TODO: Get L1 tokens on your address
|
||||
* Ask testnet operator to send L1 tokens and ETH to your chain address
|
||||
|
||||
* [README for transferring tokens](./ops/nitro-token-ops.md#transfer-deployed-tokens-to-given-address)
|
||||
|
||||
* [README for transferring ETH](./ops/nitro-token-ops.md#transfer-eth)
|
||||
|
||||
* Check balance of your tokens once they are transferred:
|
||||
|
||||
```bash
|
||||
# Note: Account address should be without "0x"
|
||||
export ACCOUNT_ADDRESS="<account-address>"
|
||||
|
||||
export GETH_CHAIN_ID="1212"
|
||||
export GETH_CHAIN_URL="https://fixturenet-eth.laconic.com"
|
||||
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
|
||||
# Check balance of eth account
|
||||
curl -X POST $GETH_CHAIN_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc":"2.0",
|
||||
"method":"eth_getBalance",
|
||||
"params":["'"$ACCOUNT_ADDRESS"'", "latest"],
|
||||
"id":1
|
||||
}'
|
||||
|
||||
# Check balance of first asset address
|
||||
curl -X POST $GETH_CHAIN_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc":"2.0",
|
||||
"method":"eth_call",
|
||||
"params":[{
|
||||
"to": "'"$ASSET_ADDRESS_1"'",
|
||||
"data": "0x70a08231000000000000000000000000'"$ACCOUNT_ADDRESS"'"
|
||||
}, "latest"],
|
||||
"id":1
|
||||
}'
|
||||
|
||||
# Check balance of second asset address
|
||||
curl -X POST $GETH_CHAIN_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"jsonrpc":"2.0",
|
||||
"method":"eth_call",
|
||||
"params":[{
|
||||
"to": "'"$ASSET_ADDRESS_2"'",
|
||||
"data": "0x70a08231000000000000000000000000'"$ACCOUNT_ADDRESS"'"
|
||||
}, "latest"],
|
||||
"id":1
|
||||
}'
|
||||
```
|
||||
|
||||
* Edit `nitro-vars.yml` and add the following variables:
|
||||
|
||||
@ -90,36 +117,27 @@
|
||||
nitro_l2_ext_multiaddr: ""
|
||||
```
|
||||
|
||||
* Update the target dir in `setup-vars.yml`:
|
||||
* Edit the `setup-vars.yml` to update the target directory:
|
||||
|
||||
```bash
|
||||
# Set path to desired deployments dir (under your user)
|
||||
DEPLOYMENTS_DIR=<path-to-deployments-dir>
|
||||
# Set absolute path to desired deployments directory (under your user)
|
||||
# Example: /home/dev/nitro-node-deployments
|
||||
...
|
||||
nitro_directory: <path-to-deployments-dir>
|
||||
...
|
||||
|
||||
sed -i "s|^nitro_directory:.*|nitro_directory: $DEPLOYMENTS_DIR/nitro-node|" setup-vars.yml
|
||||
|
||||
# Will create deployments at $DEPLOYMENTS_DIR/nitro-node/l1-nitro-deployment and $DEPLOYMENTS_DIR/nitro-node/l2-nitro-deployment
|
||||
# Will create deployments at <path-to-deployments-dir>/l1-nitro-deployment and <path-to-deployments-dir>/l2-nitro-deployment
|
||||
```
|
||||
|
||||
## Run Nitro Nodes
|
||||
|
||||
Nitro nodes can be run using Ansible either locally or on a remote machine; follow corresponding steps for your setup
|
||||
|
||||
### On Local Host
|
||||
|
||||
* Setup and run a Nitro node (L1+L2) by executing the `run-nitro-nodes.yml` Ansible playbook:
|
||||
|
||||
```bash
|
||||
LANG=en_US.utf8 ansible-playbook -i localhost, --connection=local run-nitro-nodes.yml --extra-vars='{ "target_host": "localhost"}' --user $USER
|
||||
```
|
||||
|
||||
### On Remote Host
|
||||
Nitro nodes can be set up on a target machine using Ansible:
|
||||
|
||||
* In `testnet-ops/nitro-nodes-setup`, create a new `hosts.ini` file:
|
||||
|
||||
```bash
|
||||
cp ../hosts.example.ini hosts.ini
|
||||
```
|
||||
```bash
|
||||
cp ../hosts.example.ini hosts.ini
|
||||
```
|
||||
|
||||
* Edit the [`hosts.ini`](./hosts.ini) file to run the playbook on a remote machine:
|
||||
|
||||
@ -131,12 +149,12 @@ Nitro nodes can be run using Ansible either locally or on a remote machine; foll
|
||||
* Replace `<deployment_host>` with `nitro_host`
|
||||
* Replace `<host_name>` with the alias of your choice
|
||||
* Replace `<target_ip>` with the IP address or hostname of the target machine
|
||||
* Replace `<ssh_user>` with the SSH username (e.g., dev, ubuntu)
|
||||
* Replace `<ssh_user>` with the username of the user that you set up on target machine (e.g. dev, ubuntu)
|
||||
|
||||
* Verify that you are able to connect to the host using the following command
|
||||
|
||||
```bash
|
||||
ansible all -m ping -i hosts.ini -k
|
||||
ansible all -m ping -i hosts.ini
|
||||
|
||||
# If using password based authentication, enter the ssh password on prompt; otherwise, leave it blank
|
||||
|
||||
@ -151,13 +169,10 @@ Nitro nodes can be run using Ansible either locally or on a remote machine; foll
|
||||
# }
|
||||
```
|
||||
|
||||
* Execute the `run-nitro-nodes.yml` Ansible playbook for remote deployment:
|
||||
* Execute the `run-nitro-nodes.yml` Ansible playbook to setup and run a Nitro node (L1+L2):
|
||||
|
||||
```bash
|
||||
LANG=en_US.utf8 ansible-playbook -i hosts.ini run-nitro-nodes.yml --extra-vars='{ "target_host": "nitro_host"}' --user $USER -kK
|
||||
|
||||
# If using password based authentication, enter the ssh password on prompt; otherwise, leave it blank
|
||||
# Enter the sudo password as "BECOME password" on prompt
|
||||
LANG=en_US.utf8 ansible-playbook -i hosts.ini run-nitro-nodes.yml --extra-vars='{ "target_host": "nitro_host"}' --user $USER
|
||||
```
|
||||
|
||||
### Check Deployment Status
|
||||
@ -165,9 +180,7 @@ Nitro nodes can be run using Ansible either locally or on a remote machine; foll
|
||||
* Run the following commands on deployment machine:
|
||||
|
||||
```bash
|
||||
DEPLOYMENTS_DIR=<path-to-deployments-dir>
|
||||
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
|
||||
# Check the logs, ensure that the nodes are running
|
||||
laconic-so deployment --dir l1-nitro-deployment logs nitro-node -f
|
||||
@ -182,7 +195,7 @@ Nitro nodes can be run using Ansible either locally or on a remote machine; foll
|
||||
* Get your Nitro node's info:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l1-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-node-info -p 4005 -h nitro-node"
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-node-info -p 4005 -h nitro-node"
|
||||
|
||||
# Expected output:
|
||||
# {
|
||||
@ -202,18 +215,18 @@ Create a ledger channel with the bridge on L1 which is mirrored on L2
|
||||
* Set required variables:
|
||||
|
||||
```bash
|
||||
DEPLOYMENTS_DIR=<path-to-deployments-dir>
|
||||
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
|
||||
export BRIDGE_NITRO_ADDRESS=$(yq eval '.bridge_nitro_address' nitro-node-config.yml)
|
||||
|
||||
export GETH_CHAIN_ID="1212"
|
||||
|
||||
# Get asset addresses from assets.json file
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
```
|
||||
|
||||
* Check that check that you have no existing channels on L1 or L2:
|
||||
* Check that you have no existing channels on L1 or L2:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l1-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-all-ledger-channels -p 4005 -h nitro-node"
|
||||
@ -223,6 +236,8 @@ Create a ledger channel with the bridge on L1 which is mirrored on L2
|
||||
# []
|
||||
```
|
||||
|
||||
* Ensure that your account has enough balance of tokens from `assets.json`
|
||||
|
||||
* Create a ledger channel between your L1 Nitro node and Bridge with custom asset:
|
||||
|
||||
```bash
|
||||
@ -325,9 +340,7 @@ Perform payments using a virtual payment channel created with another Nitro node
|
||||
* Switch to the `nitro-node` directory:
|
||||
|
||||
```bash
|
||||
DEPLOYMENTS_DIR=<path-to-deployments-dir>
|
||||
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
```
|
||||
|
||||
* Check status of the mirrored channel on L2:
|
||||
@ -366,9 +379,6 @@ Perform payments using a virtual payment channel created with another Nitro node
|
||||
```bash
|
||||
export BRIDGE_NITRO_ADDRESS=$(yq eval '.bridge_nitro_address' nitro-node-config.yml)
|
||||
|
||||
# Counterparty to create the payment channel with
|
||||
export COUNTER_PARTY_ADDRESS=<counterparty-nitro-address>
|
||||
|
||||
# Mirrored channel on L2
|
||||
export L2_CHANNEL_ID=<l2-channel-id>
|
||||
|
||||
@ -376,6 +386,21 @@ Perform payments using a virtual payment channel created with another Nitro node
|
||||
export PAYMENT_CHANNEL_AMOUNT=500
|
||||
```
|
||||
|
||||
* Set counterparty address
|
||||
|
||||
```bash
|
||||
export COUNTER_PARTY_ADDRESS=<counterparty-nitro-address>
|
||||
```
|
||||
|
||||
* Get the nitro address of the counterparty's node with whom you want create payment channel
|
||||
|
||||
* To get the nitro address of the your node:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-node-info -p 4005 -h nitro-node"
|
||||
# `SCAddress` -> nitro address
|
||||
```
|
||||
|
||||
* Check for existing payment channels for the L2 channel:
|
||||
|
||||
```bash
|
||||
@ -447,6 +472,8 @@ Perform payments using a virtual payment channel created with another Nitro node
|
||||
|
||||
* Check L2 mirrored channel's status after the virtual payment channel is closed:
|
||||
|
||||
* This can be checked by both nodes
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-all-ledger-channels -p 4005 -h nitro-node"
|
||||
|
||||
@ -489,9 +516,7 @@ Perform swaps using a swap channel created with another Nitro node over the mirr
|
||||
* Switch to the `nitro-node` directory:
|
||||
|
||||
```bash
|
||||
DEPLOYMENTS_DIR=<path-to-deployments-dir>
|
||||
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
```
|
||||
|
||||
* Check status of the mirrored channel on L2:
|
||||
@ -530,14 +555,28 @@ Perform swaps using a swap channel created with another Nitro node over the mirr
|
||||
```bash
|
||||
export BRIDGE_NITRO_ADDRESS=$(yq eval '.bridge_nitro_address' nitro-node-config.yml)
|
||||
|
||||
# Counterparty to create the swap channel with
|
||||
export COUNTER_PARTY_ADDRESS=<counterparty-nitro-address>
|
||||
export GETH_CHAIN_ID="1212"
|
||||
|
||||
# Get asset addresses from assets.json file
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
```
|
||||
|
||||
* Set counterparty address
|
||||
|
||||
```bash
|
||||
export COUNTER_PARTY_ADDRESS=<counterparty-nitro-address>
|
||||
```
|
||||
|
||||
* Get the nitro address of the counterparty's node with whom you want create swap channel
|
||||
|
||||
* To get the nitro address of the your node:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-node-info -p 4005 -h nitro-node"
|
||||
# `SCAddress` -> nitro address
|
||||
```
|
||||
|
||||
* Create swap channel:
|
||||
|
||||
```bash
|
||||
@ -584,6 +623,90 @@ Perform swaps using a swap channel created with another Nitro node over the mirr
|
||||
|
||||
### Performing swaps
|
||||
|
||||
* Ensure that environment variables for asset addresses are set (should be done by both parties):
|
||||
|
||||
```bash
|
||||
export GETH_CHAIN_ID="1212"
|
||||
|
||||
# Get asset addresses from assets.json file
|
||||
export ASSET_ADDRESS_1=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken.address' assets.json)
|
||||
export ASSET_ADDRESS_2=$(jq -r --arg chainId "$GETH_CHAIN_ID" '.[$chainId][0].contracts.TestToken2.address' assets.json)
|
||||
```
|
||||
|
||||
* Get all active swap channels for a specific mirrored ledger channel (should be done by both parties)
|
||||
|
||||
* To get mirrored ledger channels:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-all-ledger-channels -p 4005 -h nitro-node"
|
||||
|
||||
# Example output:
|
||||
# [
|
||||
# {
|
||||
# "ID": "0xb34210b763d4fdd534190ba11886ad1daa1e411c87be6fd20cff74cd25077c46",
|
||||
# "Status": "Open",
|
||||
# "Balances": [
|
||||
# {
|
||||
# "AssetAddress": "0xa4351114dae1abeb2d552d441c9733c72682a45d",
|
||||
# "Me": "0x075400039e303b3fb46c0cff0404c5fa61947c05",
|
||||
# "Them": "0xf0e6a85c6d23aca9ff1b83477d426ed26f218185",
|
||||
# "MyBalance": 1000n,
|
||||
# "TheirBalance": 1000n
|
||||
# },
|
||||
# {
|
||||
# "AssetAddress": "0x314e43f9825b10961859c2a62c2de6a765c1c1f1",
|
||||
# "Me": "0x075400039e303b3fb46c0cff0404c5fa61947c05",
|
||||
# "Them": "0xf0e6a85c6d23aca9ff1b83477d426ed26f218185",
|
||||
# "MyBalance": 1000n,
|
||||
# "TheirBalance": 1000n
|
||||
# }
|
||||
# ],
|
||||
# "ChannelMode": "Open"
|
||||
# }
|
||||
# ]
|
||||
```
|
||||
|
||||
* Export ledger channel ID:
|
||||
|
||||
```bash
|
||||
export LEDGER_CHANNEL_ID=
|
||||
```
|
||||
|
||||
* To get swap channels for a ledger channel:
|
||||
|
||||
```bash
|
||||
laconic-so deployment --dir l2-nitro-deployment exec nitro-rpc-client "nitro-rpc-client get-swap-channels-by-ledger $LEDGER_CHANNEL_ID -p 4005 -h nitro-node"
|
||||
# Example Output:
|
||||
# [
|
||||
# {
|
||||
# ID: '0x1dbd58d314f123f4b0f4147eee7fd92fa523ba7082d8a75b846f6d1189e2f0e9',
|
||||
# Status: 'Open',
|
||||
# Balances: [
|
||||
# {
|
||||
# AssetAddress: '0xa4351114dae1abeb2d552d441c9733c72682a45d',
|
||||
# Me: '0x075400039e303b3fb46c0cff0404c5fa61947c05',
|
||||
# Them: '0xd0ea8b27591b1d070cccd4d30b8d408fe794fdfc',
|
||||
# MyBalance: 100,
|
||||
# TheirBalance: 100n
|
||||
# },
|
||||
# {
|
||||
# AssetAddress: '0x314e43f9825b10961859c2a62c2de6a765c1c1f1',
|
||||
# Me: '0x075400039e303b3fb46c0cff0404c5fa61947c05',
|
||||
# Them: '0xd0ea8b27591b1d070cccd4d30b8d408fe794fdfc',
|
||||
# MyBalance: 100,
|
||||
# TheirBalance: 100
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# ]
|
||||
```
|
||||
|
||||
* Export swap channel ID:
|
||||
|
||||
```bash
|
||||
export SWAP_CHANNEL_ID=
|
||||
```
|
||||
|
||||
* One of the participants can initiate the swap and other one will either accept it or reject it
|
||||
|
||||
* For initiating the swap:
|
||||
@ -733,7 +856,7 @@ Perform swaps using a swap channel created with another Nitro node over the mirr
|
||||
* Switch to deployments dir:
|
||||
|
||||
```bash
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
```
|
||||
|
||||
* Stop all Nitro services running in the background:
|
||||
@ -775,7 +898,7 @@ Perform swaps using a swap channel created with another Nitro node over the mirr
|
||||
* Stop the deployment:
|
||||
|
||||
```bash
|
||||
cd $DEPLOYMENTS_DIR/nitro-node
|
||||
cd <path-to-deployments-dir>
|
||||
|
||||
laconic-so deployment --dir l1-nitro-deployment stop
|
||||
```
|
||||
|
Loading…
Reference in New Issue
Block a user