Compare commits

..

2 Commits

Author SHA1 Message Date
zramsay
25dce3f051 records wants string
Some checks failed
Lint Checks / Run linter (pull_request) Failing after 16s
K8s Deployment Control Test / Run deployment control suite on kind/k8s (pull_request) Failing after 1m23s
Smoke Test / Run basic test suite (pull_request) Failing after 1m6s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Failing after 1m24s
Webapp Test / Run webapp test suite (pull_request) Successful in 1m50s
Deploy Test / Run deploy test suite (pull_request) Successful in 2m36s
2025-05-02 14:12:31 -04:00
zramsay
87251ba65b modifications to accept atom payments
Some checks failed
Lint Checks / Run linter (pull_request) Failing after 27s
Webapp Test / Run webapp test suite (pull_request) Successful in 1m52s
Deploy Test / Run deploy test suite (pull_request) Successful in 2m38s
K8s Deploy Test / Run deploy test suite on kind/k8s (pull_request) Successful in 4m5s
Smoke Test / Run basic test suite (pull_request) Successful in 1m32s
K8s Deployment Control Test / Run deployment control suite on kind/k8s (pull_request) Successful in 2m33s
2025-04-30 11:06:01 -04:00
14 changed files with 197 additions and 141 deletions

View File

@ -26,14 +26,8 @@ fi
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
WORK_DIR="${1:-/app}"
if [ -f "${WORK_DIR}/build-webapp.sh" ]; then
echo "Building webapp with ${WORK_DIR}/build-webapp.sh ..."
cd "${WORK_DIR}" || exit 1
./build-webapp.sh || exit 1
exit 0
fi
if [ -f "next.config.mjs" ]; then
NEXT_CONFIG_JS="next.config.mjs"
IMPORT_OR_REQUIRE="import"

View File

@ -30,44 +30,36 @@ fi
CERC_WEBAPP_FILES_DIR="${CERC_WEBAPP_FILES_DIR:-/app}"
cd "$CERC_WEBAPP_FILES_DIR"
if [ -f "./run-webapp.sh" ]; then
echo "Running webapp with run-webapp.sh ..."
cd "${WORK_DIR}" || exit 1
./run-webapp.sh &
tpid=$!
wait $tpid
else
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
mv .next .next.old
mv .next-r/.next .
"$SCRIPT_DIR/apply-runtime-env.sh" "`pwd`" .next .next-r
mv .next .next.old
mv .next-r/.next .
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
jq -e '.scripts.cerc_generate' package.json >/dev/null
if [ $? -eq 0 ]; then
npm run cerc_generate > gen.out 2>&1 &
tail -f gen.out &
tpid=$!
if [ "$CERC_NEXTJS_SKIP_GENERATE" != "true" ]; then
jq -e '.scripts.cerc_generate' package.json >/dev/null
if [ $? -eq 0 ]; then
npm run cerc_generate > gen.out 2>&1 &
tail -f gen.out &
tpid=$!
count=0
generate_done="false"
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
sleep 1
count=$((count + 1))
grep 'rendered as static' gen.out > /dev/null
if [ $? -eq 0 ]; then
generate_done="true"
fi
done
if [ $generate_done != "true" ]; then
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
exit 1
count=0
generate_done="false"
while [ $count -lt $CERC_MAX_GENERATE_TIME ] && [ "$generate_done" == "false" ]; do
sleep 1
count=$((count + 1))
grep 'rendered as static' gen.out > /dev/null
if [ $? -eq 0 ]; then
generate_done="true"
fi
done
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
tpid=""
if [ $generate_done != "true" ]; then
echo "ERROR: 'npm run cerc_generate' not successful within CERC_MAX_GENERATE_TIME" 1>&2
exit 1
fi
fi
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}
kill $tpid $(ps -ef | grep node | grep next | grep generate | awk '{print $2}') 2>/dev/null
tpid=""
fi
fi
$CERC_BUILD_TOOL start . -- -p ${CERC_LISTEN_PORT:-80}

View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
from stack_orchestrator.deploy.deployment_context import DeploymentContext
from ruamel.yaml import YAML
def create(context: DeploymentContext, extra_args):
@ -22,12 +23,17 @@ def create(context: DeploymentContext, extra_args):
# deterministic-deployment-proxy contract, which itself is a prereq for Optimism contract deployment
fixturenet_eth_compose_file = context.deployment_dir.joinpath('compose', 'docker-compose-fixturenet-eth.yml')
with open(fixturenet_eth_compose_file, 'r') as yaml_file:
yaml = YAML()
yaml_data = yaml.load(yaml_file)
new_script = '../config/fixturenet-optimism/run-geth.sh:/opt/testnet/run.sh'
def add_geth_volume(yaml_data):
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
if new_script not in yaml_data['services']['fixturenet-eth-geth-1']['volumes']:
yaml_data['services']['fixturenet-eth-geth-1']['volumes'].append(new_script)
context.modify_yaml(fixturenet_eth_compose_file, add_geth_volume)
with open(fixturenet_eth_compose_file, 'w') as yaml_file:
yaml = YAML()
yaml.dump(yaml_data, yaml_file)
return None

View File

@ -2,6 +2,7 @@ version: "1.0"
name: test
description: "A test stack"
repos:
- git.vdb.to/cerc-io/laconicd
- git.vdb.to/cerc-io/test-project@test-branch
containers:
- cerc/test-container

View File

@ -45,22 +45,20 @@ class DeploymentContext:
def get_compose_dir(self):
return self.deployment_dir.joinpath(constants.compose_dir_name)
def get_compose_file(self, name: str):
return self.get_compose_dir() / f"docker-compose-{name}.yml"
def get_cluster_id(self):
return self.id
def init(self, dir: Path):
self.deployment_dir = dir.absolute()
def init(self, dir):
self.deployment_dir = dir
self.spec = Spec()
self.spec.init_from_file(self.get_spec_file())
self.stack = Stack(self.spec.obj["stack"])
self.stack.init_from_file(self.get_stack_file())
deployment_file_path = self.get_deployment_file()
if deployment_file_path.exists():
obj = get_yaml().load(open(deployment_file_path, "r"))
self.id = obj[constants.cluster_id_key]
with deployment_file_path:
obj = get_yaml().load(open(deployment_file_path, "r"))
self.id = obj[constants.cluster_id_key]
# Handle the case of a legacy deployment with no file
# Code below is intended to match the output from _make_default_cluster_name()
# TODO: remove when we no longer need to support legacy deployments
@ -69,19 +67,3 @@ class DeploymentContext:
unique_cluster_descriptor = f"{path},{self.get_stack_file()},None,None"
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()[:16]
self.id = f"{constants.cluster_name_prefix}{hash}"
def modify_yaml(self, file_path: Path, modifier_func):
"""
Load a YAML from the deployment, apply a modification function, and write it back.
"""
if not file_path.absolute().is_relative_to(self.deployment_dir):
raise ValueError(f"File is not inside deployment directory: {file_path}")
yaml = get_yaml()
with open(file_path, 'r') as f:
yaml_data = yaml.load(f)
modifier_func(yaml_data)
with open(file_path, 'w') as f:
yaml.dump(yaml_data, f)

View File

@ -443,16 +443,18 @@ def _check_volume_definitions(spec):
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.argument('extra_args', nargs=-1, type=click.UNPROCESSED)
# TODO: Hack
@click.option("--network-dir", help="Network configuration supplied in this directory")
@click.option("--initial-peers", help="Initial set of persistent peers")
@click.pass_context
def create(ctx, spec_file, deployment_dir, extra_args):
def create(ctx, spec_file, deployment_dir, network_dir, initial_peers):
deployment_command_context = ctx.obj
return create_operation(deployment_command_context, spec_file, deployment_dir, extra_args)
return create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers)
# The init command's implementation is in a separate function so that we can
# call it from other commands, bypassing the click decoration stuff
def create_operation(deployment_command_context, spec_file, deployment_dir, extra_args):
def create_operation(deployment_command_context, spec_file, deployment_dir, network_dir, initial_peers):
parsed_spec = Spec(os.path.abspath(spec_file), get_parsed_deployment_spec(spec_file))
_check_volume_definitions(parsed_spec)
stack_name = parsed_spec["stack"]
@ -539,7 +541,7 @@ def create_operation(deployment_command_context, spec_file, deployment_dir, extr
deployer_config_generator = getDeployerConfigGenerator(deployment_type, deployment_context)
# TODO: make deployment_dir_path a Path above
deployer_config_generator.generate(deployment_dir_path)
call_stack_deploy_create(deployment_context, extra_args)
call_stack_deploy_create(deployment_context, [network_dir, initial_peers, deployment_command_context])
# TODO: this code should be in the stack .py files but

View File

@ -92,8 +92,9 @@ class Spec:
return self.obj.get(item, default)
def init_from_file(self, file_path: Path):
self.obj = get_yaml().load(open(file_path, "r"))
self.file_path = file_path
with file_path:
self.obj = get_yaml().load(open(file_path, "r"))
self.file_path = file_path
def get_image_registry(self):
return self.obj.get(constants.image_registry_key)

View File

@ -27,4 +27,5 @@ class Stack:
self.name = name
def init_from_file(self, file_path: Path):
self.obj = get_yaml().load(open(file_path, "r"))
with file_path:
self.obj = get_yaml().load(open(file_path, "r"))

View File

@ -92,6 +92,7 @@ def create_deployment(ctx, deployment_dir, image, url, kube_config, image_regist
spec_file_name,
deployment_dir,
None,
None
)
# Fix up the container tag inside the deployment compose file
_fixup_container_tag(deployment_dir, image)

View File

@ -342,6 +342,17 @@ def dump_known_requests(filename, requests, status="SEEN"):
help="Requests must have a minimum payment to be processed (in alnt)",
default=0,
)
@click.option(
"--atom-payment-address",
help="Cosmos ATOM address to receive payments",
default=None,
)
@click.option(
"--min-atom-payment",
help="Minimum required ATOM payment amount",
default=1,
type=float,
)
@click.option("--lrn", help="The LRN of this deployer.", required=True)
@click.option(
"--all-requests",
@ -394,6 +405,8 @@ def command( # noqa: C901
recreate_on_deploy,
log_dir,
min_required_payment,
atom_payment_address,
min_atom_payment,
lrn,
config_upload_dir,
private_key_file,
@ -630,6 +643,8 @@ def command( # noqa: C901
payment_address,
min_required_payment,
main_logger,
atom_payment_address,
min_atom_payment,
):
main_logger.log(f"{r.id}: Payment confirmed.")
requests_to_execute.append(r)

View File

@ -46,6 +46,17 @@ from stack_orchestrator.deploy.webapp.util import LaconicRegistryClient
help="List the minimum required payment (in alnt) to process a deployment request.",
default=0,
)
@click.option(
"--atom-payment-address",
help="The Cosmos ATOM address to which payments should be made.",
default=None,
)
@click.option(
"--min-atom-payment",
help="List the minimum required payment (in uatom) to process a deployment request.",
default="1000000uatom",
type=str,
)
@click.option(
"--dry-run",
help="Don't publish anything, just report what would be done.",
@ -60,6 +71,8 @@ def command( # noqa: C901
lrn,
payment_address,
min_required_payment,
atom_payment_address,
min_atom_payment,
dry_run,
):
laconic = LaconicRegistryClient(laconic_config)
@ -84,6 +97,10 @@ def command( # noqa: C901
"minimumPayment"
] = f"{min_required_payment}alnt"
if atom_payment_address:
webapp_deployer_record["record"]["atomPaymentAddress"] = atom_payment_address
webapp_deployer_record["record"]["minimumAtomPayment"] = min_atom_payment
if dry_run:
yaml.dump(webapp_deployer_record, sys.stdout)
return

View File

@ -801,7 +801,7 @@ def skip_by_tag(r, include_tags, exclude_tags):
return False
def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min_amount, logger):
def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min_amount, logger, atom_payment_address=None, atom_min_amount=None):
req_owner = laconic.get_owner(record)
if req_owner == payment_address:
# No need to confirm payment if the sender and recipient are the same account.
@ -811,67 +811,114 @@ def confirm_payment(laconic: LaconicRegistryClient, record, payment_address, min
logger.log(f"{record.id}: no payment tx info")
return False
# Try to verify as a laconic payment first
tx = laconic.get_tx(record.attributes.payment)
if not tx:
logger.log(f"{record.id}: cannot locate payment tx")
return False
if tx.code != 0:
logger.log(
f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}"
)
return False
if tx.sender != req_owner:
logger.log(
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment "
f"request owner {req_owner}"
)
return False
if tx.recipient != payment_address:
logger.log(
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}"
)
return False
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
if pay_denom != "alnt":
logger.log(
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination"
)
return False
pay_amount = int("".join([i for i in tx.amount if i.isdigit()]))
if pay_amount < min_amount:
logger.log(
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
)
return False
# Check if the payment was already used on a deployment
used = laconic.app_deployments(
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
)
if len(used):
# Fetch the app name from request record
used_request = laconic.get_record(used[0].attributes.request, require=True)
# Check that payment was used for deployment of same application
if record.attributes.application != used_request.attributes.application:
logger.log(f"{record.id}: payment {tx.hash} already used on a different application deployment {used}")
if tx:
if tx.code != 0:
logger.log(
f"{record.id}: payment tx {tx.hash} was not successful - code: {tx.code}, log: {tx.log}"
)
return False
used = laconic.app_deployment_removals(
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
)
if len(used):
logger.log(
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
)
return False
if tx.sender != req_owner:
logger.log(
f"{record.id}: payment sender {tx.sender} in tx {tx.hash} does not match deployment "
f"request owner {req_owner}"
)
return False
return True
if tx.recipient != payment_address:
logger.log(
f"{record.id}: payment recipient {tx.recipient} in tx {tx.hash} does not match {payment_address}"
)
return False
pay_denom = "".join([i for i in tx.amount if not i.isdigit()])
if pay_denom != "alnt":
logger.log(
f"{record.id}: {pay_denom} in tx {tx.hash} is not an expected payment denomination"
)
return False
pay_amount = int("".join([i for i in tx.amount if i.isdigit()]))
if pay_amount < min_amount:
logger.log(
f"{record.id}: payment amount {tx.amount} is less than minimum {min_amount}"
)
return False
# Check if the payment was already used on a deployment
used = laconic.app_deployments(
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
)
if len(used):
# Fetch the app name from request record
used_request = laconic.get_record(used[0].attributes.request, require=True)
# Check that payment was used for deployment of same application
if record.attributes.application != used_request.attributes.application:
logger.log(f"{record.id}: payment {tx.hash} already used on a different application deployment {used}")
return False
used = laconic.app_deployment_removals(
{"deployer": record.attributes.deployer, "payment": tx.hash}, all=True
)
if len(used):
logger.log(
f"{record.id}: payment {tx.hash} already used on deployment removal {used}"
)
return False
return True
# If we get here, the transaction hash wasn't found in the laconic testnet
# Let's check if it's a valid Cosmos ATOM payment if configuration is available
if atom_payment_address:
logger.log(f"{record.id}: checking if payment is a valid Cosmos ATOM transaction")
try:
import requests
# Use the webapp-deployment-status-api to verify the ATOM payment
deployer_record = laconic.get_record(record.attributes.deployer)
if not deployer_record or not deployer_record.attributes.apiUrl:
logger.log(f"{record.id}: cannot find deployer API URL to verify ATOM payment")
return False
api_url = deployer_record.attributes.apiUrl
verify_url = f"{api_url}/verify/atom-payment"
# Make a request to the API to verify the ATOM payment
# Pass markAsUsed=true to prevent this transaction from being used again
response = requests.post(
verify_url,
json={
"txHash": record.attributes.payment,
"minAmount": atom_min_amount,
"markAsUsed": True
},
timeout=10
)
if response.status_code != 200:
logger.log(f"{record.id}: ATOM payment verification API request failed with status {response.status_code}")
return False
result = response.json()
if not result.get("valid", False):
logger.log(f"{record.id}: ATOM payment verification failed: {result.get('reason', 'unknown reason')}")
return False
# Payment is valid
logger.log(f"{record.id}: ATOM payment verified successfully, amount: {result.get('amount')} ATOM")
return True
except Exception as e:
logger.log(f"{record.id}: error verifying ATOM payment: {str(e)}")
return False
logger.log(f"{record.id}: payment tx {record.attributes.payment} not found in laconic testnet and ATOM payment verification not configured")
return False
def confirm_auction(laconic: LaconicRegistryClient, record, deployer_lrn, payment_address, logger):

View File

@ -180,7 +180,9 @@ def get_k8s_dir():
def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:
return get_yaml().load(open(spec_file_path, "r"))
with spec_file_path:
deploy_spec = get_yaml().load(open(spec_file_path, "r"))
return deploy_spec
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
print(f"Error: spec file: {spec_file_path} does not exist")

View File

@ -14,13 +14,8 @@ delete_cluster_exit () {
# Test basic stack-orchestrator deploy
echo "Running stack-orchestrator deploy test"
if [ "$1" == "from-path" ]; then
TEST_TARGET_SO="laconic-so"
else
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
fi
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"