Add port config to deployment spec. (#498)

* Add port config to deployment spec.
This commit is contained in:
Thomas E Lackey 2023-08-11 15:25:54 -05:00 committed by GitHub
parent fd78935fe6
commit 18e45af463
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 72 additions and 17 deletions

View File

@ -70,12 +70,41 @@ To permanently *delete* the stack's data volumes run:
$ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes $ laconic-so deployment --dir mainnet-eth-deployment stop --delete-data-volumes
``` ```
After deleting the volumes, any subsequent re-start will begin chain sync from cold. After deleting the volumes, any subsequent re-start will begin chain sync from cold.
## Ports
It is usually necessary to expose certain container ports on one or more the host's addresses to allow incoming connections.
Any ports defined in the Docker compose file are exposed by default with random port assignments, but the values can be
customized by editing the "spec" file generated by `laconic-so deploy init`.
In this example, ports `8545` and `5052` have been assigned to a specific addresses/port combination on the host, while
port `40000` has been left with random assignment:
```
$ cat mainnet-eth-spec.yml
stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data
mainnet_eth_lighthouse_1_data: ./data/mainnet_eth_lighthouse_1_data
```
## Data volumes ## Data volumes
Container data volumes are bind-mounted to specified paths in the host filesystem. Container data volumes are bind-mounted to specified paths in the host filesystem.
The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory: The default setup (generated by `laconic-so deploy init`) places the volumes in the `./data` subdirectory of the deployment directory:
``` ```
$ cat mainnet-eth-spec.yml $ cat mainnet-eth-spec.yml
stack: mainnet-eth stack: mainnet-eth
ports:
mainnet-eth-geth-1:
- '10.10.10.10:8545:8545'
- '40000'
mainnet-eth-lighthouse-1:
- '10.10.10.10:5052:5052'
volumes: volumes:
mainnet_eth_config_data: ./data/mainnet_eth_config_data mainnet_eth_config_data: ./data/mainnet_eth_config_data
mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data mainnet_eth_geth_1_data: ./data/mainnet_eth_geth_1_data

View File

@ -27,6 +27,20 @@ from app.deploy_types import DeploymentContext, DeployCommandContext
def _make_default_deployment_dir(): def _make_default_deployment_dir():
return "deployment-001" return "deployment-001"
def _get_ports(stack):
ports = {}
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = get_yaml()
for pod in pods:
pod_file_path = os.path.join(get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "services" in parsed_pod_file:
for svc_name, svc in parsed_pod_file["services"].items():
if "ports" in svc:
# Ports can appear as strings or numbers. We normalize them as strings.
ports[svc_name] = [ str(x) for x in svc["ports"] ]
return ports
def _get_named_volumes(stack): def _get_named_volumes(stack):
# Parse the compose files looking for named volumes # Parse the compose files looking for named volumes
@ -62,24 +76,30 @@ def _create_bind_dir_if_relative(volume, path_string, compose_dir):
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml # See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod, spec, compose_dir): def _fixup_pod_file(pod, spec, compose_dir):
# Fix up volumes # Fix up volumes
if "volumes" in spec: if "volumes" in spec:
spec_volumes = spec["volumes"] spec_volumes = spec["volumes"]
if "volumes" in pod: if "volumes" in pod:
pod_volumes = pod["volumes"] pod_volumes = pod["volumes"]
for volume in pod_volumes.keys(): for volume in pod_volumes.keys():
if volume in spec_volumes: if volume in spec_volumes:
volume_spec = spec_volumes[volume] volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}" volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir) _create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local", new_volume_spec = {"driver": "local",
"driver_opts": { "driver_opts": {
"type": "none", "type": "none",
"device": volume_spec_fixedup, "device": volume_spec_fixedup,
"o": "bind" "o": "bind"
}
} }
pod["volumes"][volume] = new_volume_spec }
pod["volumes"][volume] = new_volume_spec
# Fix up ports
if "ports" in spec:
spec_ports = spec["ports"]
for container_name, container_ports in spec_ports.items():
if container_name in pod["services"]:
pod["services"][container_name]["ports"] = container_ports
def call_stack_deploy_init(deploy_command_context): def call_stack_deploy_init(deploy_command_context):
@ -148,12 +168,18 @@ def init(ctx, output):
spec_file_content.update(default_spec_file_content) spec_file_content.update(default_spec_file_content)
if verbose: if verbose:
print(f"Creating spec file for stack: {stack}") print(f"Creating spec file for stack: {stack}")
ports = _get_ports(stack)
if ports:
spec_file_content["ports"] = ports
named_volumes = _get_named_volumes(stack) named_volumes = _get_named_volumes(stack)
if named_volumes: if named_volumes:
volume_descriptors = {} volume_descriptors = {}
for named_volume in named_volumes: for named_volume in named_volumes:
volume_descriptors[named_volume] = f"./data/{named_volume}" volume_descriptors[named_volume] = f"./data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors spec_file_content["volumes"] = volume_descriptors
with open(output, "w") as output_file: with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file) yaml.dump(spec_file_content, output_file)