Merge branch 'main' into dboreham/add-console

This commit is contained in:
David Boreham 2023-03-29 21:25:57 -06:00
commit c5cf8dda79
8 changed files with 60 additions and 29 deletions

View File

@ -8,7 +8,7 @@ services:
condition: service_healthy
image: cerc/go-ethereum-foundry:local
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
test: ["CMD", "nc", "-vz", "localhost", "8545"]
interval: 30s
timeout: 3s
retries: 10

View File

@ -39,7 +39,7 @@ services:
- "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3002"]
test: ["CMD", "nc", "-vz", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15

View File

@ -17,12 +17,16 @@ ARG NPM_GLOBAL=/usr/local/share/npm-global
# Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH}
SHELL ["/bin/bash", "-c"]
RUN \
if [ ${CERC_HOST_GID} -ne 1000 ] ; then \
groupmod -g ${CERC_HOST_GID} ${USERNAME} ; \
# Don't switch container uid/gid if the host uid/gid is 1000 (which means it's already correct),
# or root (which won't work anyway) or <= 100 (which also won't work).
if [[ ${CERC_HOST_GID} -ne 1000 && ${CERC_HOST_GID} -ne 0 && ${CERC_HOST_GID} -gt 100 ]]; then \
groupmod -g ${CERC_HOST_GID} ${USERNAME}; \
fi \
&& if [ ${CERC_HOST_UID} -ne 1000 ] ; then \
usermod -u ${CERC_HOST_UID} -g ${CERC_HOST_GID} ${USERNAME} && chown ${CERC_HOST_UID}:${CERC_HOST_GID} /home/${USERNAME} ; \
&& if [[ ${CERC_HOST_UID} -ne 1000 && ${CERC_HOST_UID} -ne 0 && ${CERC_HOST_UID} -gt 100 ]]; then \
usermod -u ${CERC_HOST_UID} -g ${CERC_HOST_GID} ${USERNAME} && chown ${CERC_HOST_UID}:${CERC_HOST_GID} /home/${USERNAME}; \
fi
RUN \

View File

@ -1,7 +1,9 @@
# Note: cerc/foundry is Debian based
FROM cerc/foundry:local
RUN apk update ; apk add --no-cache --allow-untrusted ca-certificates curl bash git jq
RUN apk add --no-cache --upgrade grep
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends jq curl netcat
WORKDIR /root
ARG GENESIS_FILE_PATH=genesis.json

View File

@ -10,6 +10,9 @@ Clone required repositories:
laconic-so --stack mobymask-v2 setup-repositories
```
NOTE: If repositories already exist and are checked out to different versions, `setup-repositories` command will throw an error.
For getting around this, the repositories mentioned below can be removed and then run the command.
Checkout to the required versions and branches in repos
```bash

View File

@ -6,14 +6,16 @@
laconic-so --stack mobymask-v2 deploy-system --include watcher-mobymask-v2 logs mobymask
```
NOTE: Clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations
The invite link is seen at the end of the logs
Example:
```
laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-1 | http://127.0.0.1:3002/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D
```
* Open the invite link in browser to use the mobymask-app.
NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations
* In the debug panel, check if it is connected to the p2p network (It should be connected to atleast one other peer for pubsub to work).
* Create an invite link in the app by clicking on `Create new invite link` button.

View File

@ -1,2 +1,2 @@
# This file should be re-generated running: scripts/update-version-file.sh script
v1.0.30-3d03b10
v1.0.35-df23476

View File

@ -292,21 +292,41 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
directive
)
if ctx.verbose:
print(f"Setting {pd.destination_container}.{pd.destination_variable} = {pd.source_container}.{pd.source_variable}")
# TODO: fix the script paths so they're consistent between containers
source_value = docker.compose.execute(pd.source_container,
["sh", "-c",
f"sh /docker-entrypoint-scripts.d/export-{pd.source_variable}.sh"],
tty=False,
envs=container_exec_env)
# TODO: handle the case that the value is not yet available
if ctx.debug:
print(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container,
["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh {source_value}"],
tty=False,
envs=container_exec_env)
if ctx.debug:
print(f"destination output: {destination_output}")
# TODO: detect errors here
print(f"Setting {pd.destination_container}.{pd.destination_variable}"
f" = {pd.source_container}.{pd.source_variable}")
# TODO: add a timeout
waiting_for_data = True
while waiting_for_data:
# TODO: fix the script paths so they're consistent between containers
source_value = None
try:
source_value = docker.compose.execute(pd.source_container,
["sh", "-c",
"sh /docker-entrypoint-scripts.d/export-"
f"{pd.source_variable}.sh"],
tty=False,
envs=container_exec_env)
except DockerException as error:
if ctx.debug:
print(f"Docker exception reading config source: {error}")
# If the script executed failed for some reason, we get:
# "It returned with code 1"
if "It returned with code 1" in str(error):
if ctx.verbose:
print("Config export script returned an error, re-trying")
# If the script failed to execute (e.g. the file is not there) then we get:
# "It returned with code 2"
if "It returned with code 2" in str(error):
print(f"Fatal error reading config source: {error}")
if source_value:
if ctx.debug:
print(f"fetched source value: {source_value}")
destination_output = docker.compose.execute(pd.destination_container,
["sh", "-c",
f"sh /scripts/import-{pd.destination_variable}.sh"
f" {source_value}"],
tty=False,
envs=container_exec_env)
waiting_for_data = False
if ctx.debug:
print(f"destination output: {destination_output}")