forked from LaconicNetwork/kompose
Merge pull request #1806 from realgam3/cronjob
Add Labels for CronJob https://github.com/kubernetes/kompose/issues/1795
This commit is contained in:
commit
f11ce5491e
@ -180,7 +180,7 @@ explicitly define the generated resources' behavior upon conversion, like Servic
|
||||
The currently supported options are:
|
||||
|
||||
| Key | Value |
|
||||
| --------------------------------------------------- | ------------------------------------------------------------------------------------ |
|
||||
|-----------------------------------------------------|--------------------------------------------------------------------------------------|
|
||||
| kompose.service.type | nodeport / clusterip / loadbalancer / headless |
|
||||
| kompose.service.group | name to group the containers contained in a single pod |
|
||||
| kompose.service.expose | true / hostnames (separated by comma) |
|
||||
@ -205,9 +205,12 @@ The currently supported options are:
|
||||
| kompose.service.healthcheck.liveness.http_get_path | kubernetes liveness httpGet path |
|
||||
| kompose.service.healthcheck.liveness.http_get_port | kubernetes liveness httpGet port |
|
||||
| kompose.service.healthcheck.liveness.tcp_port | kubernetes liveness tcpSocket port |
|
||||
| kompose.service.external-traffic-policy | 'cluster', 'local', '' | |
|
||||
| kompose.security-context.fsgroup | kubernetes pod security group fsgroup | |
|
||||
| kompose.volume.sub-path | kubernetes volume mount subpath | |
|
||||
| kompose.service.external-traffic-policy | 'cluster', 'local', '' |
|
||||
| kompose.security-context.fsgroup | kubernetes pod security group fsgroup |
|
||||
| kompose.volume.sub-path | kubernetes volume mount subpath |
|
||||
| kompose.cronjob.schedule | kubernetes cronjob schedule (for example: '1 * * * *') |
|
||||
| kompose.cronjob.concurrency_policy | 'Forbid' / 'Allow' / 'Never' / '' |
|
||||
| kompose.cronjob.backoff_limit | kubernetes cronjob backoff limit (for example: '6') |
|
||||
|
||||
**Note**: `kompose.service.type` label should be defined with `ports` only (except for headless service), otherwise `kompose` will fail.
|
||||
|
||||
@ -469,12 +472,12 @@ services:
|
||||
If you want to create normal pods without controller you can use `restart` construct of compose to define that. Follow table below to see what happens on the `restart` value.
|
||||
|
||||
| `compose` `restart` | object created | Pod `restartPolicy` |
|
||||
| -------------------------- | ----------------- | ------------------- |
|
||||
| `""` | controller object | `Always` |
|
||||
| `always` | controller object | `Always` |
|
||||
| `unless-stopped` | controller object | `Always` |
|
||||
| `on-failure` | Pod | `OnFailure` |
|
||||
| `no` | Pod | `Never` |
|
||||
|---------------------|-------------------|---------------------|
|
||||
| `""` | controller object | `Always` |
|
||||
| `always` | controller object | `Always` |
|
||||
| `unless-stopped` | controller object | `Always` |
|
||||
| `on-failure` | Pod / CronJob | `OnFailure` |
|
||||
| `no` | Pod / CronJob | `Never` |
|
||||
|
||||
**Note**: controller object could be `deployment` or `replicationcontroller`, etc.
|
||||
|
||||
@ -490,6 +493,22 @@ services:
|
||||
restart: "on-failure"
|
||||
```
|
||||
|
||||
For e.g. `pival` service will become cronjob down here. This container calculated value of `pi` every minute.
|
||||
|
||||
```yaml
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
pival:
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restart: "no"
|
||||
labels:
|
||||
kompose.cronjob.schedule: "* * * * *"
|
||||
kompose.cronjob.concurrency_policy: "Forbid"
|
||||
kompose.cronjob.backoff_limit: "0"
|
||||
```
|
||||
|
||||
#### Warning about Deployment Config's
|
||||
|
||||
If the Docker Compose file has a volume specified for a service, the Deployment (Kubernetes) or DeploymentConfig (OpenShift) strategy is changed to "Recreate" instead of "RollingUpdate" (default). This is done to avoid multiple instances of a service from accessing a volume at the same time.
|
||||
|
||||
@ -26,6 +26,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cast"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
@ -153,17 +154,20 @@ type ServiceConfig struct {
|
||||
DeployMode string `compose:""`
|
||||
VolumeMountSubPath string `compose:"kompose.volume.subpath"`
|
||||
// DeployLabels mapping to kubernetes labels
|
||||
DeployLabels map[string]string `compose:""`
|
||||
DeployUpdateConfig types.UpdateConfig `compose:""`
|
||||
TmpFs []string `compose:"tmpfs"`
|
||||
Dockerfile string `compose:"dockerfile"`
|
||||
Replicas int `compose:"replicas"`
|
||||
GroupAdd []int64 `compose:"group_add"`
|
||||
FsGroup int64 `compose:"kompose.security-context.fsgroup"`
|
||||
Volumes []Volumes `compose:""`
|
||||
Secrets []types.ServiceSecretConfig
|
||||
HealthChecks HealthChecks `compose:""`
|
||||
Placement Placement `compose:""`
|
||||
DeployLabels map[string]string `compose:""`
|
||||
DeployUpdateConfig types.UpdateConfig `compose:""`
|
||||
TmpFs []string `compose:"tmpfs"`
|
||||
Dockerfile string `compose:"dockerfile"`
|
||||
Replicas int `compose:"replicas"`
|
||||
GroupAdd []int64 `compose:"group_add"`
|
||||
FsGroup int64 `compose:"kompose.security-context.fsgroup"`
|
||||
CronJobSchedule string `compose:"kompose.cronjob.schedule"`
|
||||
CronJobConcurrencyPolicy batchv1.ConcurrencyPolicy `compose:"kompose.cronjob.concurrency_policy"`
|
||||
CronJobBackoffLimit *int32 `compose:"kompose.cronjob.backoff_limit"`
|
||||
Volumes []Volumes `compose:""`
|
||||
Secrets []types.ServiceSecretConfig
|
||||
HealthChecks HealthChecks `compose:""`
|
||||
Placement Placement `compose:""`
|
||||
//This is for long LONG SYNTAX link(https://docs.docker.com/compose/compose-file/#long-syntax)
|
||||
Configs []types.ServiceConfigObjConfig `compose:""`
|
||||
//This is for SHORT SYNTAX link(https://docs.docker.com/compose/compose-file/#configs)
|
||||
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cast"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@ -693,6 +694,42 @@ func parseEnvironment(composeServiceConfig *types.ServiceConfig, serviceConfig *
|
||||
}
|
||||
}
|
||||
|
||||
func handleCronJobConcurrencyPolicy(policy string) (batchv1.ConcurrencyPolicy, error) {
|
||||
switch policy {
|
||||
case "Allow":
|
||||
return batchv1.AllowConcurrent, nil
|
||||
case "Forbid":
|
||||
return batchv1.ForbidConcurrent, nil
|
||||
case "Replace":
|
||||
return batchv1.ReplaceConcurrent, nil
|
||||
case "":
|
||||
return "", nil
|
||||
default:
|
||||
return "", fmt.Errorf("invalid cronjob concurrency policy: %s", policy)
|
||||
}
|
||||
}
|
||||
|
||||
func handleCronJobBackoffLimit(backoffLimit string) (*int32, error) {
|
||||
if backoffLimit == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
limit, err := cast.ToInt32E(backoffLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cronjob backoff limit: %s", backoffLimit)
|
||||
}
|
||||
return &limit, nil
|
||||
}
|
||||
|
||||
func handleCronJobSchedule(schedule string) (string, error) {
|
||||
if schedule == "" {
|
||||
return "", fmt.Errorf("cronjob schedule cannot be empty")
|
||||
}
|
||||
|
||||
return schedule, nil
|
||||
|
||||
}
|
||||
|
||||
// parseKomposeLabels parse kompose labels, also do some validation
|
||||
func parseKomposeLabels(labels map[string]string, serviceConfig *kobject.ServiceConfig) error {
|
||||
// Label handler
|
||||
@ -734,6 +771,27 @@ func parseKomposeLabels(labels map[string]string, serviceConfig *kobject.Service
|
||||
serviceConfig.ImagePullPolicy = value
|
||||
case LabelContainerVolumeSubpath:
|
||||
serviceConfig.VolumeMountSubPath = value
|
||||
case LabelCronJobSchedule:
|
||||
cronJobSchedule, err := handleCronJobSchedule(value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "handleCronJobSchedule failed")
|
||||
}
|
||||
|
||||
serviceConfig.CronJobSchedule = cronJobSchedule
|
||||
case LabelCronJobConcurrencyPolicy:
|
||||
cronJobConcurrencyPolicy, err := handleCronJobConcurrencyPolicy(value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "handleCronJobConcurrencyPolicy failed")
|
||||
}
|
||||
|
||||
serviceConfig.CronJobConcurrencyPolicy = cronJobConcurrencyPolicy
|
||||
case LabelCronJobBackoffLimit:
|
||||
cronJobBackoffLimit, err := handleCronJobBackoffLimit(value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "handleCronJobBackoffLimit failed")
|
||||
}
|
||||
|
||||
serviceConfig.CronJobBackoffLimit = cronJobBackoffLimit
|
||||
default:
|
||||
serviceConfig.Labels[key] = value
|
||||
}
|
||||
@ -755,6 +813,11 @@ func parseKomposeLabels(labels map[string]string, serviceConfig *kobject.Service
|
||||
return errors.New("cannot set kompose.service.nodeport.port when service has multiple ports")
|
||||
}
|
||||
|
||||
if serviceConfig.Restart == "always" && serviceConfig.CronJobConcurrencyPolicy != "" {
|
||||
log.Infof("cronjob restart policy will be converted from '%s' to 'on-failure'", serviceConfig.Restart)
|
||||
serviceConfig.Restart = "on-failure"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -76,14 +76,18 @@ const (
|
||||
HealthCheckLivenessHTTPGetPort = "kompose.service.healthcheck.liveness.http_get_port"
|
||||
// HealthCheckLivenessTCPPort defines liveness health check tcp port
|
||||
HealthCheckLivenessTCPPort = "kompose.service.healthcheck.liveness.tcp_port"
|
||||
|
||||
// ServiceTypeHeadless ...
|
||||
ServiceTypeHeadless = "Headless"
|
||||
// LabelSecurityContextFsGroup defines the pod FsGroup
|
||||
LabelSecurityContextFsGroup = "kompose.security-context.fsgroup"
|
||||
|
||||
// LabelContainerVolumeSubpath defines the volume mount subpath inside container
|
||||
LabelContainerVolumeSubpath = "kompose.volume.subpath"
|
||||
// LabelCronJobSchedule defines the cron job schedule
|
||||
LabelCronJobSchedule = "kompose.cronjob.schedule"
|
||||
// LabelCronJobConcurrencyPolicy defines the cron job concurrency policy
|
||||
LabelCronJobConcurrencyPolicy = "kompose.cronjob.concurrency_policy"
|
||||
// LabelCronJobBackoffLimit defines the job backoff limit
|
||||
LabelCronJobBackoffLimit = "kompose.cronjob.backoff_limit"
|
||||
)
|
||||
|
||||
// load environment variables from compose file
|
||||
|
||||
@ -42,6 +42,7 @@ import (
|
||||
"github.com/spf13/cast"
|
||||
"golang.org/x/tools/godoc/util"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -470,6 +471,33 @@ func (k *Kubernetes) InitSS(name string, service kobject.ServiceConfig, replicas
|
||||
return ds
|
||||
}
|
||||
|
||||
// InitCJ initializes Kubernetes CronJob object
|
||||
func (k *Kubernetes) InitCJ(name string, service kobject.ServiceConfig, schedule string, concurrencyPolicy batchv1.ConcurrencyPolicy, backoffLimit *int32) *batchv1.CronJob {
|
||||
cj := &batchv1.CronJob{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CronJob",
|
||||
APIVersion: "batch/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: transformer.ConfigAllLabels(name, &service),
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
ConcurrencyPolicy: concurrencyPolicy,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
BackoffLimit: backoffLimit,
|
||||
Template: api.PodTemplateSpec{
|
||||
Spec: k.InitPodSpec(name, service.Image, service.ImagePullSecret),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cj
|
||||
}
|
||||
|
||||
func (k *Kubernetes) initIngress(name string, service kobject.ServiceConfig, port int32) *networkingv1.Ingress {
|
||||
hosts := regexp.MustCompile("[ ,]*,[ ,]*").Split(service.ExposeService, -1)
|
||||
|
||||
@ -1587,11 +1615,16 @@ func (k *Kubernetes) Transform(komposeObject kobject.KomposeObject, opt kobject.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate pod and configmap objects
|
||||
// Generate pod or cronjob and configmap objects
|
||||
if (service.Restart == "no" || service.Restart == "on-failure") && !opt.IsPodController() {
|
||||
log.Infof("Create kubernetes pod instead of pod controller due to restart policy: %s", service.Restart)
|
||||
pod := k.InitPod(name, service)
|
||||
objects = append(objects, pod)
|
||||
if service.CronJobSchedule != "" {
|
||||
log.Infof("Create kubernetes pod instead of pod controller due to restart policy: %s", service.Restart)
|
||||
cronJob := k.InitCJ(name, service, service.CronJobSchedule, service.CronJobConcurrencyPolicy, service.CronJobBackoffLimit)
|
||||
objects = append(objects, cronJob)
|
||||
} else {
|
||||
pod := k.InitPod(name, service)
|
||||
objects = append(objects, pod)
|
||||
}
|
||||
|
||||
if len(service.EnvFile) > 0 {
|
||||
for _, envFile := range service.EnvFile {
|
||||
@ -1651,6 +1684,12 @@ func (k *Kubernetes) UpdateController(obj runtime.Object, updateTemplate func(*a
|
||||
return errors.Wrap(err, "updateTemplate failed")
|
||||
}
|
||||
updateMeta(&t.ObjectMeta)
|
||||
case *batchv1.CronJob:
|
||||
err = updateTemplate(&t.Spec.JobTemplate.Spec.Template)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "updateTemplate failed")
|
||||
}
|
||||
updateMeta(&t.ObjectMeta)
|
||||
case *deployapi.DeploymentConfig:
|
||||
err = updateTemplate(t.Spec.Template)
|
||||
if err != nil {
|
||||
|
||||
@ -325,14 +325,20 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C
|
||||
}
|
||||
}
|
||||
|
||||
// Generate pod and configmap objects
|
||||
// Generate pod or cronjob and configmap objects
|
||||
if service.Restart == "no" || service.Restart == "on-failure" {
|
||||
// Error out if Controller Object is specified with restart: 'on-failure'
|
||||
if opt.IsDeploymentConfigFlag {
|
||||
return nil, errors.New("Controller object cannot be specified with restart: 'on-failure'")
|
||||
}
|
||||
pod := o.InitPod(name, service)
|
||||
objects = append(objects, pod)
|
||||
|
||||
if service.CronJobSchedule != "" {
|
||||
cronJob := o.InitCJ(name, service, service.CronJobSchedule, service.CronJobConcurrencyPolicy, service.CronJobBackoffLimit)
|
||||
objects = append(objects, cronJob)
|
||||
} else {
|
||||
pod := o.InitPod(name, service)
|
||||
objects = append(objects, pod)
|
||||
}
|
||||
|
||||
if len(service.EnvFile) > 0 {
|
||||
for _, envFile := range service.EnvFile {
|
||||
|
||||
@ -204,6 +204,14 @@ ocp_output="$KOMPOSE_ROOT/script/test/fixtures/statefulset/output-os.yaml"
|
||||
convert::expect_success "$k8s_cmd" "$k8s_output" || exit 1
|
||||
convert::expect_success "$ocp_cmd" "$ocp_output" || exit 1
|
||||
|
||||
# test cronjob
|
||||
k8s_cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/cronjob/docker-compose.yaml convert --stdout --with-kompose-annotation=false"
|
||||
ocp_cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/cronjob/docker-compose.yaml convert --stdout --with-kompose-annotation=false"
|
||||
k8s_output="$KOMPOSE_ROOT/script/test/fixtures/cronjob/output-k8s.yaml"
|
||||
ocp_output="$KOMPOSE_ROOT/script/test/fixtures/cronjob/output-os.yaml"
|
||||
convert::expect_success "$k8s_cmd" "$k8s_output" || exit 1
|
||||
convert::expect_success "$ocp_cmd" "$ocp_output" || exit 1
|
||||
|
||||
# test specifying volume type using service label
|
||||
k8s_cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/multiple-type-volumes/docker-compose.yaml convert --stdout --with-kompose-annotation=false"
|
||||
os_cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/multiple-type-volumes/docker-compose.yaml convert --stdout --with-kompose-annotation=false"
|
||||
|
||||
14
script/test/fixtures/cronjob/docker-compose.yaml
vendored
Normal file
14
script/test/fixtures/cronjob/docker-compose.yaml
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
challenge:
|
||||
image: busybox:stable-glibc
|
||||
labels:
|
||||
kompose.cronjob.schedule: "* * * * *"
|
||||
kompose.cronjob.concurrency_policy: "Forbid"
|
||||
kompose.cronjob.backoff_limit: "0"
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- "echo hello from cron"
|
||||
restart: "no"
|
||||
27
script/test/fixtures/cronjob/output-k8s.yaml
vendored
Normal file
27
script/test/fixtures/cronjob/output-k8s.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: challenge
|
||||
name: challenge
|
||||
spec:
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.network/cronjob-default: "true"
|
||||
io.kompose.service: challenge
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- bash
|
||||
- -c
|
||||
- echo hello from cron
|
||||
image: busybox:stable-glibc
|
||||
name: challenge
|
||||
restartPolicy: Never
|
||||
schedule: '* * * * *'
|
||||
27
script/test/fixtures/cronjob/output-os.yaml
vendored
Normal file
27
script/test/fixtures/cronjob/output-os.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: challenge
|
||||
name: challenge
|
||||
spec:
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.network/cronjob-default: "true"
|
||||
io.kompose.service: challenge
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- bash
|
||||
- -c
|
||||
- echo hello from cron
|
||||
image: busybox:stable-glibc
|
||||
name: challenge
|
||||
restartPolicy: Never
|
||||
schedule: '* * * * *'
|
||||
Loading…
Reference in New Issue
Block a user