From 62f7ce649db4f64ea7cbc2ab9a4ec504a7aff811 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 6 Mar 2024 18:38:30 +0000 Subject: [PATCH 01/13] Exit non-0 if docker build fails. (#778) Make sure to check the exit code of the docker build and bubble it back up to laconic-so. Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/778 Co-authored-by: Thomas E Lackey Co-committed-by: Thomas E Lackey --- .../data/container-build/cerc-nextjs-base/build.sh | 8 +++++++- .../data/container-build/cerc-webapp-base/build.sh | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh index cca8d64b..bb3983cf 100755 --- a/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh +++ b/stack_orchestrator/data/container-build/cerc-nextjs-base/build.sh @@ -11,8 +11,14 @@ CERC_CONTAINER_BUILD_DOCKERFILE=${CERC_CONTAINER_BUILD_DOCKERFILE:-$SCRIPT_DIR/D CERC_CONTAINER_BUILD_TAG=${CERC_CONTAINER_BUILD_TAG:-cerc/nextjs-base:local} docker build -t $CERC_CONTAINER_BUILD_TAG ${build_command_args} -f $CERC_CONTAINER_BUILD_DOCKERFILE $CERC_CONTAINER_BUILD_WORK_DIR +rc=$? -if [ $? -eq 0 ] && [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then +if [ $rc -ne 0 ]; then + echo "BUILD FAILED" 1>&2 + exit $rc +fi + +if [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/nextjs-base:local" ]; then cat <&2 + exit $rc +fi + +if [ "$CERC_CONTAINER_BUILD_TAG" != "cerc/webapp-base:local" ]; then cat < Date: Thu, 7 Mar 2024 17:38:36 +0000 Subject: [PATCH 02/13] Auto-detect which certificate to use (including wildcards). (#779) Rather than always requesting a certificate, attempt to re-use an existing certificate if it already exists in the k8s cluster. This includes matching to a wildcard certificate. Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/779 Co-authored-by: Thomas E Lackey Co-committed-by: Thomas E Lackey --- stack_orchestrator/deploy/k8s/cluster_info.py | 18 ++++--- stack_orchestrator/deploy/k8s/deploy_k8s.py | 48 +++++++++++++++++-- stack_orchestrator/deploy/spec.py | 46 +++++++++--------- 3 files changed, 78 insertions(+), 34 deletions(-) diff --git a/stack_orchestrator/deploy/k8s/cluster_info.py b/stack_orchestrator/deploy/k8s/cluster_info.py index 55393bbf..dbf7c907 100644 --- a/stack_orchestrator/deploy/k8s/cluster_info.py +++ b/stack_orchestrator/deploy/k8s/cluster_info.py @@ -101,7 +101,7 @@ class ClusterInfo: ) return service - def get_ingress(self, use_tls=False): + def get_ingress(self, use_tls=False, certificate=None, cluster_issuer="letsencrypt-prod"): # No ingress for a deployment that has no http-proxy defined, for now http_proxy_info_list = self.spec.get_http_proxy() ingress = None @@ -114,8 +114,8 @@ class ClusterInfo: host_name = http_proxy_info["host-name"] rules = [] tls = [client.V1IngressTLS( - hosts=[host_name], - secret_name=f"{self.app_name}-tls" + hosts=certificate["spec"]["dnsNames"] if certificate else [host_name], + secret_name=certificate["spec"]["secretName"] if certificate else f"{self.app_name}-tls" )] if use_tls else None paths = [] for route in http_proxy_info["routes"]: @@ -147,13 +147,17 @@ class ClusterInfo: tls=tls, rules=rules ) + + ingress_annotations = { + "kubernetes.io/ingress.class": "nginx", + } + if not certificate: + ingress_annotations["cert-manager.io/cluster-issuer"] = cluster_issuer + ingress = client.V1Ingress( metadata=client.V1ObjectMeta( name=f"{self.app_name}-ingress", - annotations={ - "kubernetes.io/ingress.class": "nginx", - "cert-manager.io/cluster-issuer": "letsencrypt-prod" - } + annotations=ingress_annotations ), spec=spec ) diff --git a/stack_orchestrator/deploy/k8s/deploy_k8s.py b/stack_orchestrator/deploy/k8s/deploy_k8s.py index a3855fee..5781cd26 100644 --- a/stack_orchestrator/deploy/k8s/deploy_k8s.py +++ b/stack_orchestrator/deploy/k8s/deploy_k8s.py @@ -169,6 +169,39 @@ class K8sDeployer(Deployer): print("Service created:") print(f"{service_resp}") + def _find_certificate_for_host_name(self, host_name): + all_certificates = self.custom_obj_api.list_namespaced_custom_object( + group="cert-manager.io", + version="v1", + namespace=self.k8s_namespace, + plural="certificates" + ) + + host_parts = host_name.split(".", 1) + host_as_wild = None + if len(host_parts) == 2: + host_as_wild = f"*.{host_parts[1]}" + + now = datetime.utcnow().replace(tzinfo=timezone.utc) + fmt = "%Y-%m-%dT%H:%M:%S%z" + + # Walk over all the configured certificates. + for cert in all_certificates["items"]: + dns = cert["spec"]["dnsNames"] + # Check for an exact hostname match or a wildcard match. + if host_name in dns or host_as_wild in dns: + status = cert.get("status", {}) + # Check the certificate date. + if "notAfter" in status and "notBefore" in status: + before = datetime.strptime(status["notBefore"], fmt) + after = datetime.strptime(status["notAfter"], fmt) + if before < now < after: + # Check the status is Ready + for condition in status.get("conditions", []): + if "True" == condition.get("status") and "Ready" == condition.get("type"): + return cert + return None + def up(self, detach, services): if not opts.o.dry_run: if self.is_kind(): @@ -189,8 +222,15 @@ class K8sDeployer(Deployer): self._create_volume_data() self._create_deployment() + http_proxy_info = self.cluster_info.spec.get_http_proxy() # Note: at present we don't support tls for kind (and enabling tls causes errors) - ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=not self.is_kind()) + use_tls = http_proxy_info and not self.is_kind() + certificate = self._find_certificate_for_host_name(http_proxy_info[0]["host-name"]) if use_tls else None + if opts.o.debug: + if certificate: + print(f"Using existing certificate: {certificate}") + + ingress: client.V1Ingress = self.cluster_info.get_ingress(use_tls=use_tls, certificate=certificate) if ingress: if opts.o.debug: print(f"Sending this ingress: {ingress}") @@ -350,9 +390,11 @@ class K8sDeployer(Deployer): name=ingress.spec.tls[0].secret_name ) - hostname = ingress.spec.tls[0].hosts[0] + hostname = ingress.spec.rules[0].host ip = ingress.status.load_balancer.ingress[0].ip - tls = "notBefore: %s, notAfter: %s" % (cert["status"]["notBefore"], cert["status"]["notAfter"]) + tls = "notBefore: %s; notAfter: %s; names: %s" % ( + cert["status"]["notBefore"], cert["status"]["notAfter"], ingress.spec.tls[0].hosts + ) except: # noqa: E722 pass diff --git a/stack_orchestrator/deploy/spec.py b/stack_orchestrator/deploy/spec.py index ab452fe3..cbec8ae5 100644 --- a/stack_orchestrator/deploy/spec.py +++ b/stack_orchestrator/deploy/spec.py @@ -27,7 +27,9 @@ class ResourceLimits: memory: int = None storage: int = None - def __init__(self, obj={}): + def __init__(self, obj=None): + if obj is None: + obj = {} if "cpus" in obj: self.cpus = float(obj["cpus"]) if "memory" in obj: @@ -50,7 +52,9 @@ class Resources: limits: ResourceLimits = None reservations: ResourceLimits = None - def __init__(self, obj={}): + def __init__(self, obj=None): + if obj is None: + obj = {} if "reservations" in obj: self.reservations = ResourceLimits(obj["reservations"]) if "limits" in obj: @@ -72,7 +76,9 @@ class Spec: obj: typing.Any file_path: Path - def __init__(self, file_path: Path = None, obj={}) -> None: + def __init__(self, file_path: Path = None, obj=None) -> None: + if obj is None: + obj = {} self.file_path = file_path self.obj = obj @@ -91,49 +97,41 @@ class Spec: self.file_path = file_path def get_image_registry(self): - return (self.obj[constants.image_registry_key] - if self.obj and constants.image_registry_key in self.obj - else None) + return self.obj.get(constants.image_registry_key) def get_volumes(self): - return (self.obj["volumes"] - if self.obj and "volumes" in self.obj - else {}) + return self.obj.get(constants.volumes_key, {}) def get_configmaps(self): - return (self.obj["configmaps"] - if self.obj and "configmaps" in self.obj - else {}) + return self.obj.get(constants.configmaps_key, {}) def get_container_resources(self): - return Resources(self.obj.get("resources", {}).get("containers", {})) + return Resources(self.obj.get(constants.resources_key, {}).get("containers", {})) def get_volume_resources(self): - return Resources(self.obj.get("resources", {}).get("volumes", {})) + return Resources(self.obj.get(constants.resources_key, {}).get(constants.volumes_key, {})) def get_http_proxy(self): - return (self.obj[constants.network_key][constants.http_proxy_key] - if self.obj and constants.network_key in self.obj - and constants.http_proxy_key in self.obj[constants.network_key] - else None) + return self.obj.get(constants.network_key, {}).get(constants.http_proxy_key, []) def get_annotations(self): - return self.obj.get("annotations", {}) + return self.obj.get(constants.annotations_key, {}) def get_labels(self): - return self.obj.get("labels", {}) + return self.obj.get(constants.labels_key, {}) def get_privileged(self): - return "true" == str(self.obj.get("security", {}).get("privileged", "false")).lower() + return "true" == str(self.obj.get(constants.security_key, {}).get("privileged", "false")).lower() def get_capabilities(self): - return self.obj.get("security", {}).get("capabilities", []) + return self.obj.get(constants.security_key, {}).get("capabilities", []) def get_deployment_type(self): - return self.obj[constants.deploy_to_key] + return self.obj.get(constants.deploy_to_key) def is_kubernetes_deployment(self): - return self.get_deployment_type() in [constants.k8s_kind_deploy_type, constants.k8s_deploy_type] + return self.get_deployment_type() in [constants.k8s_kind_deploy_type, + constants.k8s_deploy_type] def is_kind_deployment(self): return self.get_deployment_type() in [constants.k8s_kind_deploy_type] From 17e860d6e44ec0fac822018c8a15fc251ce72ddb Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Tue, 12 Mar 2024 05:32:55 +0000 Subject: [PATCH 03/13] Update subgraph watcher versions and instructions to use deployments (#775) Part of https://www.notion.so/Setup-watchers-on-sandman-34b5514a10634c6fbf3ec338967c871c Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/775 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .../data/stacks/merkl-sushiswap-v3/README.md | 75 ++++++++++----- .../data/stacks/merkl-sushiswap-v3/stack.yml | 2 +- .../data/stacks/sushiswap-subgraph/README.md | 2 +- .../data/stacks/sushiswap-v3/README.md | 92 +++++++++++++++---- .../data/stacks/sushiswap-v3/stack.yml | 2 +- 5 files changed, 129 insertions(+), 44 deletions(-) diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md index 4284c2ad..ddd8ecf6 100644 --- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/README.md @@ -16,26 +16,55 @@ laconic-so --stack merkl-sushiswap-v3 build-containers ## Deploy -### Configuration - -Create and update an env file to be used in the next step: - - ```bash - # External Filecoin (ETH RPC) endpoint to point the watcher - CERC_ETH_RPC_ENDPOINT= - ``` - -### Deploy the stack +Create a spec file for the deployment: ```bash -laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env-file up +laconic-so --stack merkl-sushiswap-v3 deploy init --output merkl-sushiswap-v3-spec.yml +``` + +### Ports + +Edit `network` in the spec file to map container ports to host ports as required: + +``` +... +network: + ports: + merkl-sushiswap-v3-watcher-db: + - '5432' + merkl-sushiswap-v3-watcher-job-runner: + - 9002:9000 + merkl-sushiswap-v3-watcher-server: + - 127.0.0.1:3007:3008 + - 9003:9001 +``` + +### Create a deployment + +Create a deployment from the spec file: + +```bash +laconic-so --stack merkl-sushiswap-v3 deploy create --spec-file merkl-sushiswap-v3-spec.yml --deployment-dir merkl-sushiswap-v3-deployment +``` + +### Configuration + +Inside deployment directory, open the `config.env` file and set following env variables: + +```bash +# External Filecoin (ETH RPC) endpoint to point the watcher to +CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1 +``` + +### Start the deployment + +```bash +laconic-so deployment --dir merkl-sushiswap-v3-deployment start ``` * To list down and monitor the running containers: ```bash - laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 ps - # With status docker ps -a @@ -46,6 +75,7 @@ laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env- * Open the GQL playground at http://localhost:3007/graphql ```graphql + # Example query { _meta { block { @@ -54,7 +84,7 @@ laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env- } hasIndexingErrors } - + factories { id poolCount @@ -64,18 +94,21 @@ laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 --env- ## Clean up -Stop all the services running in background: +Stop all the merkl-sushiswap-v3 services running in background: ```bash -laconic-so --stack merkl-sushiswap-v3 deploy --cluster merkl_sushiswap_v3 down +# Only stop the docker containers +laconic-so deployment --dir merkl-sushiswap-v3-deployment stop + +# Run 'start' to restart the deployment ``` -Clear volumes created by this stack: +To stop all the merkl-sushiswap-v3 services and also delete data: ```bash -# List all relevant volumes -docker volume ls -q --filter "name=merkl_sushiswap_v3" +# Stop the docker containers +laconic-so deployment --dir merkl-sushiswap-v3-deployment stop --delete-volumes -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=merkl_sushiswap_v3") +# Remove deployment directory (deployment will have to be recreated for a re-run) +rm -r merkl-sushiswap-v3-deployment ``` diff --git a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml index 847df33c..c080d324 100644 --- a/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/merkl-sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: merkl-sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.6 + - github.com/cerc-io/merkl-sushiswap-v3-watcher-ts@v0.1.7 containers: - cerc/watcher-merkl-sushiswap-v3 pods: diff --git a/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md b/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md index 52433a9c..fbbd3215 100644 --- a/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md +++ b/stack_orchestrator/data/stacks/sushiswap-subgraph/README.md @@ -55,7 +55,7 @@ ports: Create deployment: ```bash -laconic-so deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment +laconic-so --stack sushiswap-subgraph deploy create --spec-file sushiswap-subgraph-spec.yml --deployment-dir sushiswap-subgraph-deployment ``` ## Start the stack diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/README.md b/stack_orchestrator/data/stacks/sushiswap-v3/README.md index 7116a6d9..6bcbb54c 100644 --- a/stack_orchestrator/data/stacks/sushiswap-v3/README.md +++ b/stack_orchestrator/data/stacks/sushiswap-v3/README.md @@ -16,26 +16,55 @@ laconic-so --stack sushiswap-v3 build-containers ## Deploy -### Configuration - -Create and update an env file to be used in the next step: - - ```bash - # External Filecoin (ETH RPC) endpoint to point the watcher - CERC_ETH_RPC_ENDPOINT= - ``` - -### Deploy the stack +Create a spec file for the deployment: ```bash -laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file up +laconic-so --stack sushiswap-v3 deploy init --output sushiswap-v3-spec.yml +``` + +### Ports + +Edit `network` in the spec file to map container ports to host ports as required: + +``` +... +network: + ports: + sushiswap-v3-watcher-db: + - '5432' + sushiswap-v3-watcher-job-runner: + - 9000:9000 + sushiswap-v3-watcher-server: + - 127.0.0.1:3008:3008 + - 9001:9001 +``` + +### Create a deployment + +Create a deployment from the spec file: + +```bash +laconic-so --stack sushiswap-v3 deploy create --spec-file sushiswap-v3-spec.yml --deployment-dir sushiswap-v3-deployment +``` + +### Configuration + +Inside deployment directory, open the `config.env` file and set following env variables: + +```bash +# External Filecoin (ETH RPC) endpoint to point the watcher to +CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1 +``` + +### Start the deployment + +```bash +laconic-so deployment --dir sushiswap-v3-deployment start ``` * To list down and monitor the running containers: ```bash - laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 ps - # With status docker ps -a @@ -43,20 +72,43 @@ laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 --env-file ``` +* Open the GQL playground at http://localhost:3008/graphql + + ```graphql + # Example query + { + _meta { + block { + number + timestamp + } + hasIndexingErrors + } + + factories { + id + poolCount + } + } + ``` + ## Clean up -Stop all the services running in background: +Stop all the sushiswap-v3 services running in background: ```bash -laconic-so --stack sushiswap-v3 deploy --cluster sushiswap_v3 down +# Only stop the docker containers +laconic-so deployment --dir sushiswap-v3-deployment stop + +# Run 'start' to restart the deployment ``` -Clear volumes created by this stack: +To stop all the sushiswap-v3 services and also delete data: ```bash -# List all relevant volumes -docker volume ls -q --filter "name=sushiswap_v3" +# Stop the docker containers +laconic-so deployment --dir sushiswap-v3-deployment stop --delete-volumes -# Remove all the listed volumes -docker volume rm $(docker volume ls -q --filter "name=sushiswap_v3") +# Remove deployment directory (deployment will have to be recreated for a re-run) +rm -r sushiswap-v3-deployment ``` diff --git a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml index 12103d62..248cb381 100644 --- a/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml +++ b/stack_orchestrator/data/stacks/sushiswap-v3/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: sushiswap-v3 description: "SushiSwap v3 watcher stack" repos: - - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.6 + - github.com/cerc-io/sushiswap-v3-watcher-ts@v0.1.7 containers: - cerc/watcher-sushiswap-v3 pods: From aeddc82ebc1d8afd62e1fff848492aabab4b8f23 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Wed, 13 Mar 2024 07:16:15 +0000 Subject: [PATCH 04/13] Remove latest indexed block value from watcher alerts data (#780) Part of https://www.notion.so/Setup-grafana-SO-stack-for-monitoring-watchers-7e23042c296c4de6b8676f1f604aa03c Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/780 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .../config/monitoring/watcher-alert-rules.yml | 160 ------------------ .../data/stacks/monitoring/stack.yml | 2 +- 2 files changed, 1 insertion(+), 161 deletions(-) diff --git a/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml b/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml index 9df9472f..a190a1fd 100644 --- a/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml +++ b/stack_orchestrator/data/config/monitoring/watcher-alert-rules.yml @@ -50,22 +50,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -142,22 +126,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="censures", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -234,22 +202,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="claims", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -326,22 +278,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="conditional_star_release", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -418,22 +354,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="delegated_sending", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -510,22 +430,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="ecliptic", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -602,22 +506,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="azimuth", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -694,22 +582,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="azimuth", instance="polls", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -788,22 +660,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="sushi", instance="sushiswap", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 @@ -880,22 +736,6 @@ groups: legendFormat: __auto range: false refId: latest_external - - refId: latest_indexed - relativeTimeRange: - from: 600 - to: 0 - datasourceUid: PBFA97CFB590B2093 - model: - datasource: - type: prometheus - uid: PBFA97CFB590B2093 - editorMode: code - expr: sync_status_block_number{job="sushi", instance="merkl_sushiswap", kind="latest_indexed"} - hide: false - instant: true - legendFormat: __auto - range: false - refId: latest_indexed - refId: condition relativeTimeRange: from: 600 diff --git a/stack_orchestrator/data/stacks/monitoring/stack.yml b/stack_orchestrator/data/stacks/monitoring/stack.yml index 058ca971..48605dc3 100644 --- a/stack_orchestrator/data/stacks/monitoring/stack.yml +++ b/stack_orchestrator/data/stacks/monitoring/stack.yml @@ -1,7 +1,7 @@ version: "0.1" name: monitoring repos: - - github.com/cerc-io/watcher-ts@v0.2.79 + - github.com/cerc-io/watcher-ts@v0.2.81 containers: - cerc/watcher-ts pods: From cd508320387409fc41df5e8d227a57c48ef5cb35 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Thu, 21 Mar 2024 07:17:01 +0000 Subject: [PATCH 05/13] Add a Ajna watcher stack (#781) Part of https://www.notion.so/Generate-ajna-finance-subgraph-watcher-with-codegen-5b80ac149b3f449fb138f5d92cc5485e Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/781 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .../compose/docker-compose-watcher-ajna.yml | 76 +++++++++++ .../config/watcher-ajna/start-job-runner.sh | 20 +++ .../data/config/watcher-ajna/start-server.sh | 20 +++ .../watcher-ajna/watcher-config-template.toml | 98 +++++++++++++++ .../cerc-watcher-ajna/Dockerfile | 10 ++ .../cerc-watcher-ajna/build.sh | 9 ++ stack_orchestrator/data/stacks/ajna/README.md | 118 ++++++++++++++++++ stack_orchestrator/data/stacks/ajna/stack.yml | 9 ++ 8 files changed, 360 insertions(+) create mode 100644 stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml create mode 100755 stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh create mode 100755 stack_orchestrator/data/config/watcher-ajna/start-server.sh create mode 100644 stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml create mode 100644 stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile create mode 100755 stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh create mode 100644 stack_orchestrator/data/stacks/ajna/README.md create mode 100644 stack_orchestrator/data/stacks/ajna/stack.yml diff --git a/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml b/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml new file mode 100644 index 00000000..b3fcaab5 --- /dev/null +++ b/stack_orchestrator/data/compose/docker-compose-watcher-ajna.yml @@ -0,0 +1,76 @@ +version: '3.2' + +services: + ajna-watcher-db: + restart: unless-stopped + image: postgres:14-alpine + environment: + - POSTGRES_USER=vdbm + - POSTGRES_MULTIPLE_DATABASES=ajna-watcher,ajna-watcher-job-queue + - POSTGRES_EXTENSION=ajna-watcher-job-queue:pgcrypto + - POSTGRES_PASSWORD=password + volumes: + - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh + - ajna_watcher_db_data:/var/lib/postgresql/data + ports: + - "5432" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "5432"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 10s + + ajna-watcher-job-runner: + restart: unless-stopped + depends_on: + ajna-watcher-db: + condition: service_healthy + image: cerc/watcher-ajna:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-job-runner.sh"] + volumes: + - ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-ajna/start-job-runner.sh:/app/start-job-runner.sh + ports: + - "9000" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "9000"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + + ajna-watcher-server: + restart: unless-stopped + depends_on: + ajna-watcher-db: + condition: service_healthy + ajna-watcher-job-runner: + condition: service_healthy + image: cerc/watcher-ajna:local + environment: + CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} + CERC_ETH_RPC_ENDPOINT: ${CERC_ETH_RPC_ENDPOINT} + command: ["bash", "./start-server.sh"] + volumes: + - ../config/watcher-ajna/watcher-config-template.toml:/app/environments/watcher-config-template.toml + - ../config/watcher-ajna/start-server.sh:/app/start-server.sh + ports: + - "3008" + - "9001" + healthcheck: + test: ["CMD", "nc", "-v", "localhost", "3008"] + interval: 20s + timeout: 5s + retries: 15 + start_period: 5s + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + ajna_watcher_db_data: diff --git a/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh b/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh new file mode 100755 index 00000000..819b1096 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-ajna/start-job-runner.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running job-runner..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js diff --git a/stack_orchestrator/data/config/watcher-ajna/start-server.sh b/stack_orchestrator/data/config/watcher-ajna/start-server.sh new file mode 100755 index 00000000..e2bbdaad --- /dev/null +++ b/stack_orchestrator/data/config/watcher-ajna/start-server.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi +set -u + +echo "Using ETH RPC endpoint ${CERC_ETH_RPC_ENDPOINT}" + +# Read in the config template TOML file and modify it +WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml) +WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \ + sed -E "s|REPLACE_WITH_CERC_ETH_RPC_ENDPOINT|${CERC_ETH_RPC_ENDPOINT}| ") + +# Write the modified content to a new file +echo "$WATCHER_CONFIG" > environments/local.toml + +echo "Running server..." +DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js diff --git a/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml new file mode 100644 index 00000000..b2b54c69 --- /dev/null +++ b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml @@ -0,0 +1,98 @@ +[server] + host = "0.0.0.0" + port = 3008 + kind = "active" + gqlPath = "/graphql" + + # Checkpointing state. + checkpointing = true + + # Checkpoint interval in number of blocks. + checkpointInterval = 2000 + + # Enable state creation + # CAUTION: Disable only if state creation is not desired or can be filled subsequently + enableState = false + + subgraphPath = "./subgraph-build" + + # Interval to restart wasm instance periodically + wasmRestartBlocksInterval = 20 + + # Interval in number of blocks at which to clear entities cache. + clearEntitiesCacheInterval = 1000 + + # Max block range for which to return events in eventsInRange GQL query. + # Use -1 for skipping check on block range. + maxEventsBlockRange = 1000 + + # Flag to specify whether RPC endpoint supports block hash as block tag parameter + rpcSupportsBlockHashParam = false + + # GQL cache settings + [server.gqlCache] + enabled = true + + # Max in-memory cache size (in bytes) (default 8 MB) + # maxCacheSize + + # GQL cache-control max-age settings (in seconds) + maxAge = 15 + timeTravelMaxAge = 86400 # 1 day + +[metrics] + host = "0.0.0.0" + port = 9000 + [metrics.gql] + port = 9001 + +[database] + type = "postgres" + host = "ajna-watcher-db" + port = 5432 + database = "ajna-watcher" + username = "vdbm" + password = "password" + synchronize = true + logging = false + +[upstream] + [upstream.ethServer] + rpcProviderEndpoint = "REPLACE_WITH_CERC_ETH_RPC_ENDPOINT" + + # Boolean flag to specify if rpc-eth-client should be used for RPC endpoint instead of ipld-eth-client (ipld-eth-server GQL client) + rpcClient = true + + # Boolean flag to specify if rpcProviderEndpoint is an FEVM RPC endpoint + isFEVM = true + + # Boolean flag to filter event logs by contracts + filterLogsByAddresses = true + # Boolean flag to filter event logs by topics + filterLogsByTopics = true + + [upstream.cache] + name = "requests" + enabled = false + deleteOnStart = false + +[jobQueue] + dbConnectionString = "postgres://vdbm:password@ajna-watcher-db/ajna-watcher-job-queue" + maxCompletionLagInSecs = 300 + jobDelayInMilliSecs = 100 + eventsInBatch = 50 + subgraphEventsOrder = true + # Filecoin block time: https://docs.filecoin.io/basics/the-blockchain/blocks-and-tipsets#blocktime + blockDelayInMilliSecs = 30000 + + # Boolean to switch between modes of processing events when starting the server. + # Setting to true will fetch filtered events and required blocks in a range of blocks and then process them. + # Setting to false will fetch blocks consecutively with its events and then process them (Behaviour is followed in realtime processing near head). + useBlockRanges = true + + # Block range in which logs are fetched during historical blocks processing + historicalLogsBlockRange = 2000 + + # Max block range of historical processing after which it waits for completion of events processing + # If set to -1 historical processing does not wait for events processing and completes till latest canonical block + historicalMaxFetchAhead = 10000 diff --git a/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile b/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile new file mode 100644 index 00000000..4369d50a --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-ajna/Dockerfile @@ -0,0 +1,10 @@ +FROM node:18.17.1-alpine3.18 + +RUN apk --update --no-cache add git python3 alpine-sdk bash curl jq + +WORKDIR /app + +COPY . . + +RUN echo "Installing dependencies and building ajna-watcher-ts" && \ + yarn && yarn build diff --git a/stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh b/stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh new file mode 100755 index 00000000..4feede03 --- /dev/null +++ b/stack_orchestrator/data/container-build/cerc-watcher-ajna/build.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Build cerc/watcher-ajna + +source ${CERC_CONTAINER_BASE_DIR}/build-base.sh + +# See: https://stackoverflow.com/a/246128/1701505 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +docker build -t cerc/watcher-ajna:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/ajna-watcher-ts diff --git a/stack_orchestrator/data/stacks/ajna/README.md b/stack_orchestrator/data/stacks/ajna/README.md new file mode 100644 index 00000000..6f88ec0a --- /dev/null +++ b/stack_orchestrator/data/stacks/ajna/README.md @@ -0,0 +1,118 @@ +# Ajna Watcher + +## Setup + +Clone required repositories: + +```bash +laconic-so --stack ajna setup-repositories --git-ssh --pull +``` + +Build the container images: + +```bash +laconic-so --stack ajna build-containers +``` + +## Deploy + +Create a spec file for the deployment: + +```bash +laconic-so --stack ajna deploy init --output ajna-spec.yml +``` + +### Ports + +Edit `network` in the spec file to map container ports to host ports as required: + +```yml +... +network: + ports: + ajna-watcher-db: + - 15432:5432 + ajna-watcher-job-runner: + - 9000:9000 + ajna-watcher-server: + - 3008:3008 + - 9001:9001 +``` + +### Create a deployment + +Create a deployment from the spec file: + +```bash +laconic-so --stack ajna deploy create --spec-file ajna-spec.yml --deployment-dir ajna-deployment +``` + +### Configuration + +Inside deployment directory, open the `config.env` file and set following env variables: + +```bash +# External Filecoin (ETH RPC) endpoint to point the watcher to +CERC_ETH_RPC_ENDPOINT=https://example-lotus-endpoint/rpc/v1 +``` + +### Start the deployment + +```bash +laconic-so deployment --dir ajna-deployment start +``` + +* To list down and monitor the running containers: + + ```bash + # With status + docker ps -a + + # Check logs for a container + docker logs -f + ``` + +* Open the GQL playground at + + ```graphql + # Example query + query { + _meta { + block { + hash + number + timestamp + } + deployment + hasIndexingErrors + } + + accounts { + id + txCount + tokensDelegated + rewardsClaimed + } + } + ``` + +## Clean up + +Stop all the ajna services running in background: + +```bash +# Only stop the docker containers +laconic-so deployment --dir ajna-deployment stop + +# Run 'start' to restart the deployment +``` + +To stop all the ajna services and also delete data: + +```bash +# Stop the docker containers +laconic-so deployment --dir ajna-deployment stop --delete-volumes + +# Remove deployment directory (deployment will have to be recreated for a re-run) +rm -r ajna-deployment +``` diff --git a/stack_orchestrator/data/stacks/ajna/stack.yml b/stack_orchestrator/data/stacks/ajna/stack.yml new file mode 100644 index 00000000..6956d90d --- /dev/null +++ b/stack_orchestrator/data/stacks/ajna/stack.yml @@ -0,0 +1,9 @@ +version: "1.0" +name: ajna +description: "Ajna watcher stack" +repos: + - git.vdb.to/cerc-io/ajna-watcher-ts +containers: + - cerc/watcher-ajna +pods: + - watcher-ajna From 5308ab1e4e60f0b2b272295f04537b62884c5755 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Mon, 25 Mar 2024 19:09:26 +0000 Subject: [PATCH 06/13] Blind commit to fix laconic CLI calls after rename. (#784) `laconic cns` got renamed to `laconic registry` which breaks all the scripts and commands that use it. Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/784 Co-authored-by: Thomas E Lackey Co-committed-by: Thomas E Lackey --- .../create-demo-records.sh | 2 +- .../data/stacks/fixturenet-laconic-loaded/README.md | 2 +- .../data/stacks/fixturenet-laconicd/README.md | 2 +- stack_orchestrator/deploy/webapp/util.py | 13 +++++++------ 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh index 5a5dc34c..a95d73bd 100755 --- a/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh +++ b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh @@ -5,7 +5,7 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -registry_command="laconic cns" +registry_command="laconic registry" demo_records_dir="scripts/demo-records" # Check we have funds diff --git a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md index 94f9eb36..6d0f82a6 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md @@ -49,7 +49,7 @@ $ laconic-so --stack fixturenet-laconic-loaded deploy logs ``` ## 6. Test with the Registry CLI ``` -$ laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status" +$ laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic registry status" ``` ## 7. View the laconic console Get the URL for the console web app with this command (the port number will be different for each deployment): diff --git a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md index 1b04b875..d939d0a4 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md @@ -44,5 +44,5 @@ $ laconic-so --stack fixturenet-laconicd deploy logs ``` ## 6. Test with the Registry CLI ``` -$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status" +$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic registry status" ``` diff --git a/stack_orchestrator/deploy/webapp/util.py b/stack_orchestrator/deploy/webapp/util.py index 5c484ed1..822eca4e 100644 --- a/stack_orchestrator/deploy/webapp/util.py +++ b/stack_orchestrator/deploy/webapp/util.py @@ -94,7 +94,7 @@ class LaconicRegistryClient: ) def list_records(self, criteria={}, all=False): - args = ["laconic", "-c", self.config_file, "cns", "record", "list"] + args = ["laconic", "-c", self.config_file, "registry", "record", "list"] if all: args.append("--all") @@ -140,7 +140,7 @@ class LaconicRegistryClient: if name in self.cache.name_or_id: return self.cache.name_or_id[name] - args = ["laconic", "-c", self.config_file, "cns", "name", "resolve", name] + args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name] parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))] if parsed: @@ -165,7 +165,7 @@ class LaconicRegistryClient: "laconic", "-c", self.config_file, - "cns", + "registry", "record", "get", "--id", @@ -203,7 +203,8 @@ class LaconicRegistryClient: print(open(record_fname, 'r').read(), file=self.log_file) new_record_id = json.loads( - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "record", "publish", "--filename", record_fname) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", + "record", "publish", "--filename", record_fname) )["id"] for name in names: self.set_name(name, new_record_id) @@ -212,10 +213,10 @@ class LaconicRegistryClient: logged_cmd(self.log_file, "rm", "-rf", tmpdir) def set_name(self, name, record_id): - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "set", name, record_id) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "set", name, record_id) def delete_name(self, name): - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "delete", name) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "delete", name) def file_hash(filename): From 18b006468d80102b4282e70a50aac0028678532e Mon Sep 17 00:00:00 2001 From: Nabarun Date: Tue, 26 Mar 2024 11:50:05 +0000 Subject: [PATCH 07/13] Update GQL path for ajna subgraph watcher server (#785) Part of https://www.notion.so/Run-ajna-finance-subgraph-watcher-87748d78cd7a471b8d71f50d5fdc2657 Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/785 Co-authored-by: Nabarun Co-committed-by: Nabarun --- .../data/config/watcher-ajna/watcher-config-template.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml index b2b54c69..70fc0466 100644 --- a/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml +++ b/stack_orchestrator/data/config/watcher-ajna/watcher-config-template.toml @@ -2,7 +2,7 @@ host = "0.0.0.0" port = 3008 kind = "active" - gqlPath = "/graphql" + gqlPath = "/" # Checkpointing state. checkpointing = true From 44faf36837b52df6c3678750be2d2aaf61c52cec Mon Sep 17 00:00:00 2001 From: Nabarun Date: Tue, 26 Mar 2024 14:52:57 +0000 Subject: [PATCH 08/13] Update ajna-watcher-ts version for using new subgraph (#786) Part of https://www.notion.so/Run-ajna-finance-subgraph-watcher-87748d78cd7a471b8d71f50d5fdc2657 Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/786 Co-authored-by: Nabarun Co-committed-by: Nabarun --- stack_orchestrator/data/stacks/ajna/stack.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack_orchestrator/data/stacks/ajna/stack.yml b/stack_orchestrator/data/stacks/ajna/stack.yml index 6956d90d..4d64559e 100644 --- a/stack_orchestrator/data/stacks/ajna/stack.yml +++ b/stack_orchestrator/data/stacks/ajna/stack.yml @@ -2,7 +2,7 @@ version: "1.0" name: ajna description: "Ajna watcher stack" repos: - - git.vdb.to/cerc-io/ajna-watcher-ts + - git.vdb.to/cerc-io/ajna-watcher-ts@v0.1.1 containers: - cerc/watcher-ajna pods: From d2442bcc9bf48ec0079ac704e33e1e4872fff0e7 Mon Sep 17 00:00:00 2001 From: Thomas E Lackey Date: Wed, 27 Mar 2024 20:55:03 +0000 Subject: [PATCH 09/13] revert 5308ab1e4e60f0b2b272295f04537b62884c5755 (#788) revert Blind commit to fix laconic CLI calls after rename. (#784) `laconic cns` got renamed to `laconic registry` which breaks all the scripts and commands that use it. Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/784 Co-authored-by: Thomas E Lackey Co-committed-by: Thomas E Lackey Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/788 --- .../create-demo-records.sh | 2 +- .../data/stacks/fixturenet-laconic-loaded/README.md | 2 +- .../data/stacks/fixturenet-laconicd/README.md | 2 +- stack_orchestrator/deploy/webapp/util.py | 13 ++++++------- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh index a95d73bd..5a5dc34c 100755 --- a/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh +++ b/stack_orchestrator/data/container-build/cerc-laconic-registry-cli/create-demo-records.sh @@ -5,7 +5,7 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then set -x fi -registry_command="laconic registry" +registry_command="laconic cns" demo_records_dir="scripts/demo-records" # Check we have funds diff --git a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md index 6d0f82a6..94f9eb36 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconic-loaded/README.md @@ -49,7 +49,7 @@ $ laconic-so --stack fixturenet-laconic-loaded deploy logs ``` ## 6. Test with the Registry CLI ``` -$ laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic registry status" +$ laconic-so --stack fixturenet-laconic-loaded deploy exec cli "laconic cns status" ``` ## 7. View the laconic console Get the URL for the console web app with this command (the port number will be different for each deployment): diff --git a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md index d939d0a4..1b04b875 100644 --- a/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md +++ b/stack_orchestrator/data/stacks/fixturenet-laconicd/README.md @@ -44,5 +44,5 @@ $ laconic-so --stack fixturenet-laconicd deploy logs ``` ## 6. Test with the Registry CLI ``` -$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic registry status" +$ laconic-so --stack fixturenet-laconicd deploy exec cli "laconic cns status" ``` diff --git a/stack_orchestrator/deploy/webapp/util.py b/stack_orchestrator/deploy/webapp/util.py index 822eca4e..5c484ed1 100644 --- a/stack_orchestrator/deploy/webapp/util.py +++ b/stack_orchestrator/deploy/webapp/util.py @@ -94,7 +94,7 @@ class LaconicRegistryClient: ) def list_records(self, criteria={}, all=False): - args = ["laconic", "-c", self.config_file, "registry", "record", "list"] + args = ["laconic", "-c", self.config_file, "cns", "record", "list"] if all: args.append("--all") @@ -140,7 +140,7 @@ class LaconicRegistryClient: if name in self.cache.name_or_id: return self.cache.name_or_id[name] - args = ["laconic", "-c", self.config_file, "registry", "name", "resolve", name] + args = ["laconic", "-c", self.config_file, "cns", "name", "resolve", name] parsed = [AttrDict(r) for r in json.loads(logged_cmd(self.log_file, *args))] if parsed: @@ -165,7 +165,7 @@ class LaconicRegistryClient: "laconic", "-c", self.config_file, - "registry", + "cns", "record", "get", "--id", @@ -203,8 +203,7 @@ class LaconicRegistryClient: print(open(record_fname, 'r').read(), file=self.log_file) new_record_id = json.loads( - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", - "record", "publish", "--filename", record_fname) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "record", "publish", "--filename", record_fname) )["id"] for name in names: self.set_name(name, new_record_id) @@ -213,10 +212,10 @@ class LaconicRegistryClient: logged_cmd(self.log_file, "rm", "-rf", tmpdir) def set_name(self, name, record_id): - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "set", name, record_id) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "set", name, record_id) def delete_name(self, name): - logged_cmd(self.log_file, "laconic", "-c", self.config_file, "registry", "name", "delete", name) + logged_cmd(self.log_file, "laconic", "-c", self.config_file, "cns", "name", "delete", name) def file_hash(filename): From 105805cb9b5fd9aab2fd37b45cf85fd0f53b0da4 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Thu, 4 Apr 2024 07:16:46 +0000 Subject: [PATCH 10/13] Run registry CLI tests as part of laconicd fixturenet tests (#791) Part of https://www.notion.so/Test-registry-cli-in-SO-fixturenet-laconicd-CI-ef1f497678264362931bd12643ba8a17 Co-authored-by: neeraj Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/791 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .gitea/workflows/fixturenet-laconicd-test.yml | 4 +- .../triggers/fixturenet-laconicd-test | 1 + .../docker-compose-fixturenet-laconicd.yml | 4 ++ tests/fixturenet-laconicd/run-cli-test.sh | 37 +++++++++++++++++++ tests/webapp-test/run-webapp-test.sh | 4 +- 5 files changed, 47 insertions(+), 3 deletions(-) create mode 100755 tests/fixturenet-laconicd/run-cli-test.sh diff --git a/.gitea/workflows/fixturenet-laconicd-test.yml b/.gitea/workflows/fixturenet-laconicd-test.yml index 67f3797e..c8c3991e 100644 --- a/.gitea/workflows/fixturenet-laconicd-test.yml +++ b/.gitea/workflows/fixturenet-laconicd-test.yml @@ -11,7 +11,7 @@ on: jobs: test: - name: "Run an Laconicd fixturenet test" + name: "Run Laconicd fixturenet and Laconic CLI tests" runs-on: ubuntu-latest steps: - name: 'Update' @@ -46,3 +46,5 @@ jobs: run: ./scripts/build_shiv_package.sh - name: "Run fixturenet-laconicd tests" run: ./tests/fixturenet-laconicd/run-test.sh + - name: "Run laconic CLI tests" + run: ./tests/fixturenet-laconicd/run-cli-test.sh diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test index ad09df27..59af2ab6 100644 --- a/.gitea/workflows/triggers/fixturenet-laconicd-test +++ b/.gitea/workflows/triggers/fixturenet-laconicd-test @@ -1,3 +1,4 @@ Change this file to trigger running the fixturenet-laconicd-test CI job Trigger Trigger +Trigger diff --git a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml index 7b48f60d..38b8d442 100644 --- a/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml +++ b/stack_orchestrator/data/compose/docker-compose-fixturenet-laconicd.yml @@ -3,6 +3,9 @@ services: restart: unless-stopped image: cerc/laconicd:local command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] + environment: + TEST_AUCTION_ENABLED: ${TEST_AUCTION_ENABLED} + TEST_REGISTRY_EXPIRY: ${TEST_REGISTRY_EXPIRY} volumes: # The cosmos-sdk node's database directory: - laconicd-data:/root/.laconicd @@ -25,6 +28,7 @@ services: image: cerc/laconic-registry-cli:local volumes: - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml + - ${BASE_DIR:-~/cerc}/laconic-registry-cli:/laconic-registry-cli volumes: laconicd-data: diff --git a/tests/fixturenet-laconicd/run-cli-test.sh b/tests/fixturenet-laconicd/run-cli-test.sh new file mode 100755 index 00000000..877d0104 --- /dev/null +++ b/tests/fixturenet-laconicd/run-cli-test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -e +if [ -n "$CERC_SCRIPT_DEBUG" ]; then + set -x +fi + +echo "$(date +"%Y-%m-%d %T"): Running stack-orchestrator Laconic registry CLI tests" +env +cat /etc/hosts +# Bit of a hack, test the most recent package +TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) + +echo "$(date +"%Y-%m-%d %T"): Starting stack" +TEST_AUCTION_ENABLED=true BASE_DIR=~/cerc $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up +echo "$(date +"%Y-%m-%d %T"): Stack started" + +# Verify that the fixturenet is up and running +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd ps + +# Get the fixturenet account address +laconicd_account_address=$(docker exec laconicd-laconicd-1 laconicd keys list | awk '/- address:/ {print $3}') + +# Copy over config +docker exec laconicd-cli-1 cp config.yml laconic-registry-cli/ + +# Wait for the laconid endpoint to come up +echo "Waiting for the RPC endpoint to come up" +docker exec laconicd-laconicd-1 sh -c "curl --retry 20 --retry-delay 3 --retry-connrefused http://127.0.0.1:9473/api" + +# Run the tests +echo "Running the tests" +docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd laconic-registry-cli && yarn && yarn test' + +# Clean up +$TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes +echo "$(date +"%Y-%m-%d %T"): Test finished" diff --git a/tests/webapp-test/run-webapp-test.sh b/tests/webapp-test/run-webapp-test.sh index 8cae4828..39b08c3b 100755 --- a/tests/webapp-test/run-webapp-test.sh +++ b/tests/webapp-test/run-webapp-test.sh @@ -32,14 +32,14 @@ set +e CONTAINER_ID=$(docker run -p 3000:80 -d -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG cerc/test-progressive-web-app:local) sleep 3 -wget -t 7 -O test.before -m http://localhost:3000 +wget --tries 20 --retry-connrefused --waitretry=3 -O test.before -m http://localhost:3000 docker logs $CONTAINER_ID docker remove -f $CONTAINER_ID CONTAINER_ID=$(docker run -p 3000:80 -e CERC_WEBAPP_DEBUG=$CHECK -e CERC_SCRIPT_DEBUG=$CERC_SCRIPT_DEBUG -d cerc/test-progressive-web-app:local) sleep 3 -wget -t 7 -O test.after -m http://localhost:3000 +wget --tries 20 --retry-connrefused --waitretry=3 -O test.after -m http://localhost:3000 docker logs $CONTAINER_ID docker remove -f $CONTAINER_ID From 515f6d16f560b9e38e510206dbcef844746ea7a0 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Thu, 4 Apr 2024 10:50:02 +0000 Subject: [PATCH 11/13] Fix laconic registry CLI tests (#792) Part of https://www.notion.so/Test-registry-cli-in-SO-fixturenet-laconicd-CI-ef1f497678264362931bd12643ba8a17 Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/792 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .gitea/workflows/triggers/fixturenet-laconicd-test | 1 + tests/fixturenet-laconicd/run-cli-test.sh | 8 +++++++- tests/fixturenet-laconicd/run-test.sh | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test index 59af2ab6..f0cb9305 100644 --- a/.gitea/workflows/triggers/fixturenet-laconicd-test +++ b/.gitea/workflows/triggers/fixturenet-laconicd-test @@ -2,3 +2,4 @@ Change this file to trigger running the fixturenet-laconicd-test CI job Trigger Trigger Trigger +Trigger diff --git a/tests/fixturenet-laconicd/run-cli-test.sh b/tests/fixturenet-laconicd/run-cli-test.sh index 877d0104..8a5dcb42 100755 --- a/tests/fixturenet-laconicd/run-cli-test.sh +++ b/tests/fixturenet-laconicd/run-cli-test.sh @@ -11,8 +11,12 @@ cat /etc/hosts # Bit of a hack, test the most recent package TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) +export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX) +echo "$(date +"%Y-%m-%d %T"): Cloning laconic-registry-cli repository into: $CERC_REPO_BASE_DIR" +$TEST_TARGET_SO --stack fixturenet-laconicd setup-repositories --include git.vdb.to/cerc-io/laconic-registry-cli + echo "$(date +"%Y-%m-%d %T"): Starting stack" -TEST_AUCTION_ENABLED=true BASE_DIR=~/cerc $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up +TEST_AUCTION_ENABLED=true BASE_DIR=${CERC_REPO_BASE_DIR} $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd up echo "$(date +"%Y-%m-%d %T"): Stack started" # Verify that the fixturenet is up and running @@ -34,4 +38,6 @@ docker exec -e TEST_ACCOUNT=$laconicd_account_address laconicd-cli-1 sh -c 'cd l # Clean up $TEST_TARGET_SO --stack fixturenet-laconicd deploy --cluster laconicd down --delete-volumes +echo "$(date +"%Y-%m-%d %T"): Removing cloned repositories" +rm -rf $CERC_REPO_BASE_DIR echo "$(date +"%Y-%m-%d %T"): Test finished" diff --git a/tests/fixturenet-laconicd/run-test.sh b/tests/fixturenet-laconicd/run-test.sh index 8dad9917..1b3f1f34 100755 --- a/tests/fixturenet-laconicd/run-test.sh +++ b/tests/fixturenet-laconicd/run-test.sh @@ -12,6 +12,7 @@ cat /etc/hosts TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) # Set a new unique repo dir export CERC_REPO_BASE_DIR=$(mktemp -d $(pwd)/stack-orchestrator-fixturenet-laconicd-test.XXXXXXXXXX) + echo "$(date +"%Y-%m-%d %T"): Testing this package: $TEST_TARGET_SO" echo "$(date +"%Y-%m-%d %T"): Test version command" reported_version_string=$( $TEST_TARGET_SO version ) From 9cd34ffebbddb8112204972caf5de9fc6670b6e4 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Fri, 5 Apr 2024 08:27:46 +0000 Subject: [PATCH 12/13] Add Slack alerts for failures on CI workflows (#793) Part of https://www.notion.so/Alerting-for-failing-CI-jobs-d0183b65453947aeab11dbddf989d9c0 Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/793 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .../fixturenet-eth-plugeth-arm-test.yml | 16 ++++++++++++++++ .../workflows/fixturenet-eth-plugeth-test.yml | 16 ++++++++++++++++ .gitea/workflows/fixturenet-eth-test.yml | 17 ++++++++++++++++- .gitea/workflows/fixturenet-laconicd-test.yml | 16 ++++++++++++++++ .gitea/workflows/lint.yml | 16 ++++++++++++++++ .gitea/workflows/publish.yml | 16 ++++++++++++++++ .gitea/workflows/test-container-registry.yml | 17 ++++++++++++++++- .gitea/workflows/test-database.yml | 17 ++++++++++++++++- .gitea/workflows/test-deploy.yml | 16 ++++++++++++++++ .gitea/workflows/test-k8s-deploy.yml | 17 ++++++++++++++++- .gitea/workflows/test-webapp.yml | 16 ++++++++++++++++ .gitea/workflows/test.yml | 18 ++++++++++++++++-- .../triggers/fixturenet-laconicd-test | 1 + 13 files changed, 193 insertions(+), 6 deletions(-) diff --git a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml index 92ddd5dc..9f12854d 100644 --- a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml +++ b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml @@ -43,3 +43,19 @@ jobs: run: ./scripts/build_shiv_package.sh - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth-plugeth/run-test.sh + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/fixturenet-eth-plugeth-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-test.yml index 11bca169..8bb4ff8d 100644 --- a/.gitea/workflows/fixturenet-eth-plugeth-test.yml +++ b/.gitea/workflows/fixturenet-eth-plugeth-test.yml @@ -47,3 +47,19 @@ jobs: sleep 5 - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth-plugeth/run-test.sh + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/fixturenet-eth-test.yml b/.gitea/workflows/fixturenet-eth-test.yml index 73804df5..e5f7a24f 100644 --- a/.gitea/workflows/fixturenet-eth-test.yml +++ b/.gitea/workflows/fixturenet-eth-test.yml @@ -45,4 +45,19 @@ jobs: sleep 5 - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth/run-test.sh - + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/fixturenet-laconicd-test.yml b/.gitea/workflows/fixturenet-laconicd-test.yml index c8c3991e..50012b1d 100644 --- a/.gitea/workflows/fixturenet-laconicd-test.yml +++ b/.gitea/workflows/fixturenet-laconicd-test.yml @@ -48,3 +48,19 @@ jobs: run: ./tests/fixturenet-laconicd/run-test.sh - name: "Run laconic CLI tests" run: ./tests/fixturenet-laconicd/run-cli-test.sh + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/lint.yml b/.gitea/workflows/lint.yml index f9be1e6b..d7011b46 100644 --- a/.gitea/workflows/lint.yml +++ b/.gitea/workflows/lint.yml @@ -19,3 +19,19 @@ jobs: python-version: '3.8' - name : "Run flake8" uses: py-actions/flake8@v2 + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/publish.yml b/.gitea/workflows/publish.yml index ffbb9937..53bce1c2 100644 --- a/.gitea/workflows/publish.yml +++ b/.gitea/workflows/publish.yml @@ -54,3 +54,19 @@ jobs: # Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test" draft: ${{ endsWith('publish-test', github.ref ) }} files: ./laconic-so + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test-container-registry.yml b/.gitea/workflows/test-container-registry.yml index 3358aa09..5cc1a0cc 100644 --- a/.gitea/workflows/test-container-registry.yml +++ b/.gitea/workflows/test-container-registry.yml @@ -51,4 +51,19 @@ jobs: source /opt/bash-utils/cgroup-helper.sh join_cgroup ./tests/container-registry/run-test.sh - + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test-database.yml b/.gitea/workflows/test-database.yml index b925271b..247ed933 100644 --- a/.gitea/workflows/test-database.yml +++ b/.gitea/workflows/test-database.yml @@ -49,4 +49,19 @@ jobs: source /opt/bash-utils/cgroup-helper.sh join_cgroup ./tests/database/run-test.sh - + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test-deploy.yml b/.gitea/workflows/test-deploy.yml index 6e47c82d..8d901daf 100644 --- a/.gitea/workflows/test-deploy.yml +++ b/.gitea/workflows/test-deploy.yml @@ -47,3 +47,19 @@ jobs: sleep 5 - name: "Run deploy tests" run: ./tests/deploy/run-deploy-test.sh + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml index 7df39132..fa95608c 100644 --- a/.gitea/workflows/test-k8s-deploy.yml +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -51,4 +51,19 @@ jobs: source /opt/bash-utils/cgroup-helper.sh join_cgroup ./tests/k8s-deploy/run-deploy-test.sh - + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml index 8b83c606..6a3b1c94 100644 --- a/.gitea/workflows/test-webapp.yml +++ b/.gitea/workflows/test-webapp.yml @@ -49,3 +49,19 @@ jobs: sleep 5 - name: "Run webapp tests" run: ./tests/webapp-test/run-webapp-test.sh + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 193e27c9..9b6e843e 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -47,5 +47,19 @@ jobs: sleep 5 - name: "Run smoke tests" run: ./tests/smoke-test/run-smoke-test.sh - - + - name: Notify Vulcanize Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} + - name: Notify DeepStack Slack on CI failure + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: 'failure' + env: + SLACK_WEBHOOK_URL: ${{ secrets.DEEPSTACK_SLACK_CI_ALERTS }} diff --git a/.gitea/workflows/triggers/fixturenet-laconicd-test b/.gitea/workflows/triggers/fixturenet-laconicd-test index f0cb9305..10db7fd7 100644 --- a/.gitea/workflows/triggers/fixturenet-laconicd-test +++ b/.gitea/workflows/triggers/fixturenet-laconicd-test @@ -3,3 +3,4 @@ Trigger Trigger Trigger Trigger +Trigger From 40f362511b627c23dc18c2bd5bd7e3585c40f823 Mon Sep 17 00:00:00 2001 From: Prathamesh Musale Date: Fri, 5 Apr 2024 09:26:08 +0000 Subject: [PATCH 13/13] Run CI alert steps only on main (#797) Part of https://www.notion.so/Alerting-for-failing-CI-jobs-d0183b65453947aeab11dbddf989d9c0 - Run CI alert steps only on main to avoid alerts for in-progress PRs - The Slack alerts will be sent on a CI job failure if - A commit is pushed directly to main - A PR gets merged into main - A scheduled job runs on main Reviewed-on: https://git.vdb.to/cerc-io/stack-orchestrator/pulls/797 Co-authored-by: Prathamesh Musale Co-committed-by: Prathamesh Musale --- .gitea/workflows/fixturenet-eth-plugeth-arm-test.yml | 4 ++-- .gitea/workflows/fixturenet-eth-plugeth-test.yml | 4 ++-- .gitea/workflows/fixturenet-eth-test.yml | 4 ++-- .gitea/workflows/fixturenet-laconicd-test.yml | 4 ++-- .gitea/workflows/lint.yml | 4 ++-- .gitea/workflows/publish.yml | 4 ++-- .gitea/workflows/test-container-registry.yml | 4 ++-- .gitea/workflows/test-database.yml | 4 ++-- .gitea/workflows/test-deploy.yml | 4 ++-- .gitea/workflows/test-k8s-deploy.yml | 4 ++-- .gitea/workflows/test-webapp.yml | 4 ++-- .gitea/workflows/test.yml | 4 ++-- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml index 9f12854d..b5e8d22c 100644 --- a/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml +++ b/.gitea/workflows/fixturenet-eth-plugeth-arm-test.yml @@ -44,7 +44,7 @@ jobs: - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth-plugeth/run-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -52,7 +52,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/fixturenet-eth-plugeth-test.yml b/.gitea/workflows/fixturenet-eth-plugeth-test.yml index 8bb4ff8d..f9db5e86 100644 --- a/.gitea/workflows/fixturenet-eth-plugeth-test.yml +++ b/.gitea/workflows/fixturenet-eth-plugeth-test.yml @@ -48,7 +48,7 @@ jobs: - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth-plugeth/run-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -56,7 +56,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/fixturenet-eth-test.yml b/.gitea/workflows/fixturenet-eth-test.yml index e5f7a24f..671184a9 100644 --- a/.gitea/workflows/fixturenet-eth-test.yml +++ b/.gitea/workflows/fixturenet-eth-test.yml @@ -46,7 +46,7 @@ jobs: - name: "Run fixturenet-eth tests" run: ./tests/fixturenet-eth/run-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -54,7 +54,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/fixturenet-laconicd-test.yml b/.gitea/workflows/fixturenet-laconicd-test.yml index 50012b1d..ae5bf2d5 100644 --- a/.gitea/workflows/fixturenet-laconicd-test.yml +++ b/.gitea/workflows/fixturenet-laconicd-test.yml @@ -49,7 +49,7 @@ jobs: - name: "Run laconic CLI tests" run: ./tests/fixturenet-laconicd/run-cli-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -57,7 +57,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/lint.yml b/.gitea/workflows/lint.yml index d7011b46..44156eae 100644 --- a/.gitea/workflows/lint.yml +++ b/.gitea/workflows/lint.yml @@ -20,7 +20,7 @@ jobs: - name : "Run flake8" uses: py-actions/flake8@v2 - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -28,7 +28,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/publish.yml b/.gitea/workflows/publish.yml index 53bce1c2..13b94ba5 100644 --- a/.gitea/workflows/publish.yml +++ b/.gitea/workflows/publish.yml @@ -55,7 +55,7 @@ jobs: draft: ${{ endsWith('publish-test', github.ref ) }} files: ./laconic-so - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -63,7 +63,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test-container-registry.yml b/.gitea/workflows/test-container-registry.yml index 5cc1a0cc..f0cfb74c 100644 --- a/.gitea/workflows/test-container-registry.yml +++ b/.gitea/workflows/test-container-registry.yml @@ -52,7 +52,7 @@ jobs: join_cgroup ./tests/container-registry/run-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -60,7 +60,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test-database.yml b/.gitea/workflows/test-database.yml index 247ed933..f66b1b77 100644 --- a/.gitea/workflows/test-database.yml +++ b/.gitea/workflows/test-database.yml @@ -50,7 +50,7 @@ jobs: join_cgroup ./tests/database/run-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -58,7 +58,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test-deploy.yml b/.gitea/workflows/test-deploy.yml index 8d901daf..426b629b 100644 --- a/.gitea/workflows/test-deploy.yml +++ b/.gitea/workflows/test-deploy.yml @@ -48,7 +48,7 @@ jobs: - name: "Run deploy tests" run: ./tests/deploy/run-deploy-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -56,7 +56,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test-k8s-deploy.yml b/.gitea/workflows/test-k8s-deploy.yml index fa95608c..de96b391 100644 --- a/.gitea/workflows/test-k8s-deploy.yml +++ b/.gitea/workflows/test-k8s-deploy.yml @@ -52,7 +52,7 @@ jobs: join_cgroup ./tests/k8s-deploy/run-deploy-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -60,7 +60,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test-webapp.yml b/.gitea/workflows/test-webapp.yml index 6a3b1c94..708c6b3d 100644 --- a/.gitea/workflows/test-webapp.yml +++ b/.gitea/workflows/test-webapp.yml @@ -50,7 +50,7 @@ jobs: - name: "Run webapp tests" run: ./tests/webapp-test/run-webapp-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -58,7 +58,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 9b6e843e..f017dc49 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -48,7 +48,7 @@ jobs: - name: "Run smoke tests" run: ./tests/smoke-test/run-smoke-test.sh - name: Notify Vulcanize Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }} @@ -56,7 +56,7 @@ jobs: env: SLACK_WEBHOOK_URL: ${{ secrets.VULCANIZE_SLACK_CI_ALERTS }} - name: Notify DeepStack Slack on CI failure - if: always() + if: ${{ always() && github.ref_name == 'main' }} uses: ravsamhq/notify-slack-action@v2 with: status: ${{ job.status }}