diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 83f889b8..9e6894c8 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -5,6 +5,9 @@ on: branches: [ master ] pull_request: branches: [ master ] +env: + # Avoid noisy outputs like "tput: No value for $TERM and no -T specified" + TERM: dumb jobs: @@ -25,8 +28,6 @@ jobs: - name: Build run: make bin - - name: Test - run: make test - name: Upload a Build Artifact uses: actions/upload-artifact@v2.1.4 diff --git a/cmd/convert.go b/cmd/convert.go index 15a66fbc..ac03c11b 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -60,6 +60,9 @@ var ( // MultipleContainerMode which enables creating multi containers in a single pod is a developping function. // default is false MultipleContainerMode bool + + ServiceGroupMode string + ServiceGroupName string ) var convertCmd = &cobra.Command{ @@ -104,6 +107,12 @@ var convertCmd = &cobra.Command{ YAMLIndent: ConvertYAMLIndent, WithKomposeAnnotation: WithKomposeAnnotation, MultipleContainerMode: MultipleContainerMode, + ServiceGroupMode: ServiceGroupMode, + ServiceGroupName: ServiceGroupName, + } + + if ServiceGroupMode == "" && MultipleContainerMode { + ConvertOpt.ServiceGroupMode = "label" } app.ValidateFlags(args, cmd, &ConvertOpt) @@ -133,6 +142,9 @@ func init() { convertCmd.Flags().MarkHidden("replication-controller") convertCmd.Flags().MarkHidden("deployment") convertCmd.Flags().BoolVar(&MultipleContainerMode, "multiple-container-mode", false, "Create multiple containers grouped by 'kompose.service.group' label") + convertCmd.Flags().StringVar(&ServiceGroupMode, "service-group-mode", "", "Group multiple service to create single workload by `label`(`kompose.service.group`) or `volume`(shared volumes)") + convertCmd.Flags().StringVar(&ServiceGroupName, "service-group-name", "", "Using with --service-group-mode=volume to specific a final service name for the group") + convertCmd.Flags().MarkDeprecated("multiple-container-mode", "use --service-group-mode=label") // OpenShift only convertCmd.Flags().BoolVar(&ConvertDeploymentConfig, "deployment-config", true, "Generate an OpenShift deploymentconfig object") @@ -182,15 +194,14 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Kubernetes Flags: - --daemon-set Generate a Kubernetes daemonset object (deprecated, use --controller instead) - -d, --deployment Generate a Kubernetes deployment object (deprecated, use --controller instead) -c, --chart Create a Helm chart for converted objects - --replication-controller Generate a Kubernetes replication controller object (deprecated, use --controller instead) + --controller Set the output controller ("deployment"|"daemonSet"|"replicationController") + --service-group-mode Group multiple service to create single workload by "label"("kompose.service.group") or "volume"(shared volumes) + --service-group-name Using with --service-group-mode=volume to specific a final service name for the group OpenShift Flags: --build-branch Specify repository branch to use for buildconfig (default is current branch name) --build-repo Specify source repository for buildconfig (default is current branch's remote url) - --deployment-config Generate an OpenShift deployment config object --insecure-repository Specify to use insecure docker repository while generating Openshift image stream object Flags: diff --git a/pkg/kobject/kobject.go b/pkg/kobject/kobject.go index 6d42c344..15d8195e 100644 --- a/pkg/kobject/kobject.go +++ b/pkg/kobject/kobject.go @@ -80,6 +80,8 @@ type ConvertOptions struct { WithKomposeAnnotation bool MultipleContainerMode bool + ServiceGroupMode string + ServiceGroupName string } // IsPodController indicate if the user want to use a controller @@ -153,6 +155,7 @@ type ServiceConfig struct { ConfigsMetaData map[string]dockerCliTypes.ConfigObjConfig `compose:""` WithKomposeAnnotation bool `compose:""` + InGroup bool } // HealthChecks used to distinguish between liveness and readiness @@ -189,6 +192,11 @@ type Ports struct { Protocol string // Upper string } +// ID returns an unique id for this port settings, to avoid conflict +func (port *Ports) ID() string { + return string(port.ContainerPort) + port.Protocol +} + // Volumes holds the volume struct of container type Volumes struct { SvcName string // Service name to which volume is linked diff --git a/pkg/transformer/kubernetes/k8sutils.go b/pkg/transformer/kubernetes/k8sutils.go index 7ebc6291..8312bff0 100644 --- a/pkg/transformer/kubernetes/k8sutils.go +++ b/pkg/transformer/kubernetes/k8sutils.go @@ -369,8 +369,11 @@ func (k *Kubernetes) PortsExist(service kobject.ServiceConfig) bool { func (k *Kubernetes) initSvcObject(name string, service kobject.ServiceConfig, ports []api.ServicePort) *api.Service { svc := k.InitSvc(name, service) - svc.Spec.Ports = ports + // special case, only for loaderbalancer type + svc.Name = name + svc.Spec.Selector = transformer.ConfigLabels(service.Name) + svc.Spec.Ports = ports svc.Spec.Type = api.ServiceType(service.ServiceType) // Configure annotations @@ -520,9 +523,7 @@ func (k *Kubernetes) UpdateKubernetesObjects(name string, service kobject.Servic // fillTemplate fills the pod template with the value calculated from config fillTemplate := func(template *api.PodTemplateSpec) error { - if len(service.ContainerName) > 0 { - template.Spec.Containers[0].Name = FormatContainerName(service.ContainerName) - } + template.Spec.Containers[0].Name = GetContainerName(service) template.Spec.Containers[0].Env = envs template.Spec.Containers[0].Command = service.Command template.Spec.Containers[0].Args = service.Args @@ -697,17 +698,51 @@ func (k *Kubernetes) UpdateKubernetesObjects(name string, service kobject.Servic return nil } -// KomposeObjectToServiceConfigGroupMapping returns the service config group by name -func KomposeObjectToServiceConfigGroupMapping(komposeObject kobject.KomposeObject) map[string]kobject.ServiceConfigGroup { +// getServiceVolumesID create a unique id for the service's volume mounts +func getServiceVolumesID(service kobject.ServiceConfig) string { + id := "" + for _, v := range service.VolList { + id += v + } + return id +} + +// getServiceGroupID ... +// return empty string should mean this service should go alone +func getServiceGroupID(service kobject.ServiceConfig, mode string) string { + if mode == "label" { + return service.Labels[compose.LabelServiceGroup] + } + if mode == "volume" { + return getServiceVolumesID(service) + } + return "" +} + +// KomposeObjectToServiceConfigGroupMapping returns the service config group by name or by volume +// This group function works as following +// 1. Support two mode +// (1): label: use a custom label, the service that contains it will be merged to one workload. +// (2): volume: the service that share to exactly same volume config will be merged to one workload. If use pvc, only +// create one for this group. +// 2. If service containers restart policy and no workload argument provide and it's restart policy looks like a pod, then +// this service should generate a pod. If group mode specified, it should be grouped and ignore the restart policy. +// 3. If group mode specified, port conflict between services in one group will be ignored, and multiple service should be created. +// 4. If `volume` group mode specified, we don't have an appropriate name for this combined service, use the first one for now. +// A warn/info message should be printed to let the user know. +func KomposeObjectToServiceConfigGroupMapping(komposeObject *kobject.KomposeObject, opt kobject.ConvertOptions) map[string]kobject.ServiceConfigGroup { serviceConfigGroup := make(map[string]kobject.ServiceConfigGroup) + for name, service := range komposeObject.ServiceConfigs { - if groupID, ok := service.Labels[compose.LabelServiceGroup]; ok { + groupID := getServiceGroupID(service, opt.ServiceGroupMode) + if groupID != "" { service.Name = name + service.InGroup = true serviceConfigGroup[groupID] = append(serviceConfigGroup[groupID], service) - } else { - serviceConfigGroup[name] = append(serviceConfigGroup[name], service) + komposeObject.ServiceConfigs[name] = service } } + return serviceConfigGroup } @@ -893,6 +928,14 @@ func FormatContainerName(name string) string { return name } +func GetContainerName(service kobject.ServiceConfig) string { + name := service.Name + if len(service.ContainerName) > 0 { + name = FormatContainerName(service.ContainerName) + } + return name +} + // FormatResourceName generate a valid k8s resource name func FormatResourceName(name string) string { return strings.ToLower(strings.Replace(name, "_", "-", -1)) diff --git a/pkg/transformer/kubernetes/kubernetes.go b/pkg/transformer/kubernetes/kubernetes.go index 6efd558c..8121679a 100644 --- a/pkg/transformer/kubernetes/kubernetes.go +++ b/pkg/transformer/kubernetes/kubernetes.go @@ -106,7 +106,6 @@ func (k *Kubernetes) InitPodSpec(name string, image string, pullSecret string) a if image == "" { image = name } - pod := api.PodSpec{ Containers: []api.Container{ { @@ -193,6 +192,8 @@ func (k *Kubernetes) InitPodSpecWithConfigMap(name string, image string, service } // InitSvc initializes Kubernetes Service object +// The created service name will = ServiceConfig.Name, but the selector may be not. +// If this service is grouped, the selector may be another name = name func (k *Kubernetes) InitSvc(name string, service kobject.ServiceConfig) *api.Service { svc := &api.Service{ TypeMeta: metav1.TypeMeta{ @@ -200,12 +201,12 @@ func (k *Kubernetes) InitSvc(name string, service kobject.ServiceConfig) *api.Se APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: service.Name, Labels: transformer.ConfigLabels(name), }, // The selector uses the service.Name, which must be consistent with workloads label Spec: api.ServiceSpec{ - Selector: transformer.ConfigLabels(service.Name), + Selector: transformer.ConfigLabels(name), }, } return svc @@ -570,11 +571,10 @@ func (k *Kubernetes) CreatePVC(name string, mode string, size string, selectorVa // ConfigPorts configures the container ports. func ConfigPorts(service kobject.ServiceConfig) []api.ContainerPort { - ports := []api.ContainerPort{} + var ports []api.ContainerPort exist := map[string]bool{} for _, port := range service.Port { - // temp use as an id - if exist[string(port.ContainerPort)+port.Protocol] { + if exist[port.ID()] { continue } containerPort := api.ContainerPort{ @@ -586,7 +586,7 @@ func ConfigPorts(service kobject.ServiceConfig) []api.ContainerPort { containerPort.Protocol = protocol } ports = append(ports, containerPort) - exist[string(port.ContainerPort)+port.Protocol] = true + exist[port.ID()] = true } return ports @@ -831,6 +831,9 @@ func (k *Kubernetes) ConfigVolumes(name string, service kobject.ServiceConfig) ( } else { volumeName = volume.PVCName } + // to support service group bases on volume, we need use the new group name to replace the origin service name + // in volume name. For normal service, this should have no effect + volumeName = strings.Replace(volumeName, service.Name, name, 1) count++ } else { volumeName = volume.VolumeName @@ -1242,7 +1245,7 @@ func (k *Kubernetes) configKubeServiceAndIngressForService(service kobject.Servi svc := k.CreateHeadlessService(name, service) *objects = append(*objects, svc) } else { - log.Warnf("Service %q won't be created because 'ports' is not specified", name) + log.Warnf("Service %q won't be created because 'ports' is not specified", service.Name) } } } @@ -1278,30 +1281,50 @@ func (k *Kubernetes) Transform(komposeObject kobject.KomposeObject, opt kobject. } } - if opt.MultipleContainerMode { - komposeObjectToServiceConfigGroupMapping := KomposeObjectToServiceConfigGroupMapping(komposeObject) + if opt.ServiceGroupMode != "" { + log.Debugf("Service group mode is: %s", opt.ServiceGroupMode) + komposeObjectToServiceConfigGroupMapping := KomposeObjectToServiceConfigGroupMapping(&komposeObject, opt) for name, group := range komposeObjectToServiceConfigGroupMapping { var objects []runtime.Object podSpec := PodSpec{} + // if using volume group, the name here will be a volume config string. reset to the first service name + if opt.ServiceGroupMode == "volume" { + if opt.ServiceGroupName != "" { + name = opt.ServiceGroupName + } else { + var names []string + for _, svc := range group { + names = append(names, svc.Name) + } + name = strings.Join(names, "-") + } + } + // added a container + // ports conflict check between services + portsUses := map[string]bool{} + for _, service := range group { + // first do ports check + ports := ConfigPorts(service) + for _, port := range ports { + key := string(port.ContainerPort) + string(port.Protocol) + if portsUses[key] { + return nil, fmt.Errorf("detect ports conflict when group services, service: %s, port: %d", service.Name, port.ContainerPort) + } + portsUses[key] = true + } + + log.Infof("Group Service %s to [%s]", service.Name, name) service.WithKomposeAnnotation = opt.WithKomposeAnnotation podSpec.Append(AddContainer(service, opt)) if err := buildServiceImage(opt, service, service.Name); err != nil { return nil, err } - - // Generate pod only and nothing more - if (service.Restart == "no" || service.Restart == "on-failure") && !opt.IsPodController() { - log.Infof("Create kubernetes pod instead of pod controller due to restart policy: %s", service.Restart) - pod := k.InitPod(name, service) - objects = append(objects, pod) - } else { - objects = k.CreateWorkloadAndConfigMapObjects(name, service, opt) - } - + // override.. + objects = append(objects, k.CreateWorkloadAndConfigMapObjects(name, service, opt)...) k.configKubeServiceAndIngressForService(service, name, &objects) // Configure the container volumes. @@ -1309,20 +1332,17 @@ func (k *Kubernetes) Transform(komposeObject kobject.KomposeObject, opt kobject. if err != nil { return nil, errors.Wrap(err, "k.ConfigVolumes failed") } + // Configure Tmpfs + if len(service.TmpFs) > 0 { + TmpVolumesMount, TmpVolumes := k.ConfigTmpfs(name, service) + volumes = append(volumes, TmpVolumes...) + volumesMount = append(volumesMount, TmpVolumesMount...) + } podSpec.Append( SetVolumeMounts(volumesMount), SetVolumes(volumes), ) - // Configure Tmpfs - if len(service.TmpFs) > 0 { - TmpVolumesMount, TmpVolumes := k.ConfigTmpfs(name, service) - - volumes = append(volumes, TmpVolumes...) - - volumesMount = append(volumesMount, TmpVolumesMount...) - } - if pvc != nil { // Looping on the slice pvc instead of `*objects = append(*objects, pvc...)` // because the type of objects and pvc is different, but when doing append @@ -1368,39 +1388,44 @@ func (k *Kubernetes) Transform(komposeObject kobject.KomposeObject, opt kobject. allobjects = append(allobjects, objects...) } - } else { - sortedKeys := SortedKeys(komposeObject) - for _, name := range sortedKeys { - service := komposeObject.ServiceConfigs[name] - var objects []runtime.Object + } + sortedKeys := SortedKeys(komposeObject) + for _, name := range sortedKeys { + service := komposeObject.ServiceConfigs[name] - service.WithKomposeAnnotation = opt.WithKomposeAnnotation - - if err := buildServiceImage(opt, service, name); err != nil { - return nil, err - } - - // Generate pod only and nothing more - if (service.Restart == "no" || service.Restart == "on-failure") && !opt.IsPodController() { - log.Infof("Create kubernetes pod instead of pod controller due to restart policy: %s", service.Restart) - pod := k.InitPod(name, service) - objects = append(objects, pod) - } else { - objects = k.CreateWorkloadAndConfigMapObjects(name, service, opt) - } - - k.configKubeServiceAndIngressForService(service, name, &objects) - - err := k.UpdateKubernetesObjects(name, service, opt, &objects) - if err != nil { - return nil, errors.Wrap(err, "Error transforming Kubernetes objects") - } - - if err := k.configNetworkPolicyForService(service, name, &objects); err != nil { - return nil, err - } - allobjects = append(allobjects, objects...) + // if service belongs to a group, we already processed it + if service.InGroup { + continue } + + var objects []runtime.Object + + service.WithKomposeAnnotation = opt.WithKomposeAnnotation + + if err := buildServiceImage(opt, service, name); err != nil { + return nil, err + } + + // Generate pod only and nothing more + if (service.Restart == "no" || service.Restart == "on-failure") && !opt.IsPodController() { + log.Infof("Create kubernetes pod instead of pod controller due to restart policy: %s", service.Restart) + pod := k.InitPod(name, service) + objects = append(objects, pod) + } else { + objects = k.CreateWorkloadAndConfigMapObjects(name, service, opt) + } + + k.configKubeServiceAndIngressForService(service, name, &objects) + + err := k.UpdateKubernetesObjects(name, service, opt, &objects) + if err != nil { + return nil, errors.Wrap(err, "Error transforming Kubernetes objects") + } + + if err := k.configNetworkPolicyForService(service, name, &objects); err != nil { + return nil, err + } + allobjects = append(allobjects, objects...) } // sort all object so Services are first diff --git a/pkg/transformer/kubernetes/kubernetes_test.go b/pkg/transformer/kubernetes/kubernetes_test.go index 0237d6a6..2d68a63e 100644 --- a/pkg/transformer/kubernetes/kubernetes_test.go +++ b/pkg/transformer/kubernetes/kubernetes_test.go @@ -70,6 +70,14 @@ func newServiceConfig() kobject.ServiceConfig { } } +func newSimpleServiceConfig() kobject.ServiceConfig { + return kobject.ServiceConfig{ + Name: "app", + ContainerName: "name", + Image: "image", + } +} + func newKomposeObject() kobject.KomposeObject { return kobject.KomposeObject{ ServiceConfigs: map[string]kobject.ServiceConfig{"app": newServiceConfig()}, @@ -602,15 +610,13 @@ func TestConfigAffinity(t *testing.T) { func TestMultipleContainersInPod(t *testing.T) { groupName := "pod_group" - containerName := "" - createConfig := func(name string, containerName *string) kobject.ServiceConfig { - config := newServiceConfig() - config.Network = nil + createConfig := func(name string, containerName string) kobject.ServiceConfig { + config := newSimpleServiceConfig() config.Labels = map[string]string{compose.LabelServiceGroup: groupName} config.Name = name - if containerName != nil { - config.ContainerName = *containerName + if containerName != "" { + config.ContainerName = containerName } config.Volumes = []kobject.Volumes{ { @@ -627,28 +633,13 @@ func TestMultipleContainersInPod(t *testing.T) { expectedNumObjs int expectedNames []string }{ - "Converted multiple containers": { - kobject.KomposeObject{ - ServiceConfigs: map[string]kobject.ServiceConfig{ - "app1": createConfig("app1", &containerName), - "app2": createConfig("app2", &containerName), - }, - }, kobject.ConvertOptions{MultipleContainerMode: true}, 3, []string{"app1", "app2"}}, "Converted multiple containers to Deployments (D)": { kobject.KomposeObject{ ServiceConfigs: map[string]kobject.ServiceConfig{ - "app1": createConfig("app1", &containerName), - "app2": createConfig("app2", &containerName), + "app1": createConfig("app1", "app1"), + "app2": createConfig("app2", "app2"), }, - }, kobject.ConvertOptions{MultipleContainerMode: true, CreateD: true}, 4, []string{"app1", "app2"}}, - "Converted multiple containers (ContainerName are nil) to Deployments (D)": { - kobject.KomposeObject{ - ServiceConfigs: map[string]kobject.ServiceConfig{ - "app1": createConfig("app1", nil), - "app2": createConfig("app2", nil), - }, - }, kobject.ConvertOptions{MultipleContainerMode: true, CreateD: true}, 4, []string{"name", "name"}}, - // TODO: add more tests + }, kobject.ConvertOptions{ServiceGroupMode: "label", CreateD: true}, 2, []string{"app1", "app2"}}, } for name, test := range testCases { @@ -702,7 +693,7 @@ func TestServiceAccountNameOnMultipleContainers(t *testing.T) { createConfigs := func(labels map[string]string) map[string]kobject.ServiceConfig { createConfig := func(name string) kobject.ServiceConfig { - config := newServiceConfig() + config := newSimpleServiceConfig() config.Labels = map[string]string{compose.LabelServiceGroup: groupName} for k, v := range labels { config.Labels[k] = v @@ -734,7 +725,7 @@ func TestServiceAccountNameOnMultipleContainers(t *testing.T) { t.Log("Test case:", name) k := Kubernetes{} // Run Transform - objs, err := k.Transform(test.komposeObject, kobject.ConvertOptions{MultipleContainerMode: true, CreateD: true}) + objs, err := k.Transform(test.komposeObject, kobject.ConvertOptions{ServiceGroupMode: "label", CreateD: true}) if err != nil { t.Error(errors.Wrap(err, "k.Transform failed")) } diff --git a/pkg/transformer/kubernetes/podspec.go b/pkg/transformer/kubernetes/podspec.go index 3dbc5de8..ddb6abc7 100644 --- a/pkg/transformer/kubernetes/podspec.go +++ b/pkg/transformer/kubernetes/podspec.go @@ -21,18 +21,13 @@ type PodSpecOption func(*PodSpec) func AddContainer(service kobject.ServiceConfig, opt kobject.ConvertOptions) PodSpecOption { return func(podSpec *PodSpec) { - name := service.Name + name := GetContainerName(service) image := service.Image if image == "" { image = name } - // do not override in openshift case? - if len(service.ContainerName) > 0 { - name = FormatContainerName(service.ContainerName) - } - envs, err := ConfigEnvs(service, opt) if err != nil { panic("Unable to load env variables") @@ -187,6 +182,7 @@ func SetVolumeMountPaths(volumesMount []api.VolumeMount) mapset.Set { for _, volumeMount := range volumesMount { set.Add(volumeMount.MountPath) } + return set } @@ -214,7 +210,9 @@ func SetPorts(service kobject.ServiceConfig) PodSpecOption { ports := ConfigPorts(service) for i := range podSpec.Containers { - podSpec.Containers[i].Ports = ports + if GetContainerName(service) == podSpec.Containers[i].Name { + podSpec.Containers[i].Ports = ports + } } } } diff --git a/script/test/cmd/tests_new.sh b/script/test/cmd/tests_new.sh index 39f6bbb2..a97ae4ab 100755 --- a/script/test/cmd/tests_new.sh +++ b/script/test/cmd/tests_new.sh @@ -140,4 +140,10 @@ ocp_cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/exp k8s_output="$KOMPOSE_ROOT/script/test/fixtures/expose/output-k8s.json" ocp_output="$KOMPOSE_ROOT/script/test/fixtures/expose/output-ocp.json" convert::expect_success "$k8s_cmd" "$k8s_output" -convert::expect_success "$ocp_cmd" "$ocp_output" \ No newline at end of file +convert::expect_success "$ocp_cmd" "$ocp_output" + + +# test service group by volume, not support openshift for now +k8s_cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/service-group/compose.yaml convert --stdout -j --with-kompose-annotation=false --service-group-mode=volume" +k8s_output="$KOMPOSE_ROOT/script/test/fixtures/service-group/output-k8s.json" +convert::expect_success_and_warning "$k8s_cmd" "$k8s_output" \ No newline at end of file diff --git a/script/test/fixtures/service-group/compose.yaml b/script/test/fixtures/service-group/compose.yaml new file mode 100644 index 00000000..165a5d81 --- /dev/null +++ b/script/test/fixtures/service-group/compose.yaml @@ -0,0 +1,28 @@ +version: "3.5" + +services: + librenms: + image: librenms/librenms:latest + container_name: librenms + hostname: librenms + ports: + - target: 8000 + published: 8000 + protocol: tcp + volumes: + - "./librenms:/data" + environment: + - "TZ=${TZ}" + restart: always + + dispatcher: + image: librenms/dispatcher:latest + container_name: dispatcher + hostname: dispatcher + depends_on: + - librenms + volumes: + - "./librenms:/data" + environment: + - "TZ=${TZ}" + restart: always \ No newline at end of file diff --git a/script/test/fixtures/service-group/output-k8s.json b/script/test/fixtures/service-group/output-k8s.json new file mode 100644 index 00000000..4f6e66a6 --- /dev/null +++ b/script/test/fixtures/service-group/output-k8s.json @@ -0,0 +1,137 @@ +{ + "kind": "List", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "librenms", + "creationTimestamp": null, + "labels": { + "io.kompose.service": "librenms-dispatcher" + } + }, + "spec": { + "ports": [ + { + "name": "8000", + "port": 8000, + "targetPort": 8000 + } + ], + "selector": { + "io.kompose.service": "librenms-dispatcher" + } + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Deployment", + "apiVersion": "apps/v1", + "metadata": { + "name": "librenms-dispatcher", + "creationTimestamp": null, + "labels": { + "io.kompose.service": "librenms-dispatcher" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "io.kompose.service": "librenms-dispatcher" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "io.kompose.service": "librenms-dispatcher" + } + }, + "spec": { + "volumes": [ + { + "name": "librenms-dispatcher-claim0", + "persistentVolumeClaim": { + "claimName": "librenms-dispatcher-claim0" + } + } + ], + "containers": [ + { + "name": "librenms", + "image": "librenms/librenms:latest", + "ports": [ + { + "containerPort": 8000 + } + ], + "env": [ + { + "name": "TZ" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "librenms-dispatcher-claim0", + "mountPath": "/data" + } + ] + }, + { + "name": "dispatcher", + "image": "librenms/dispatcher:latest", + "env": [ + { + "name": "TZ" + } + ], + "resources": {}, + "volumeMounts": [ + { + "name": "librenms-dispatcher-claim0", + "mountPath": "/data" + } + ] + } + ], + "restartPolicy": "Always", + "hostname": "dispatcher" + } + }, + "strategy": { + "type": "Recreate" + } + }, + "status": {} + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "librenms-dispatcher-claim0", + "creationTimestamp": null, + "labels": { + "io.kompose.service": "librenms-dispatcher-claim0" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "100Mi" + } + } + }, + "status": {} + } + ] +}