forked from LaconicNetwork/kompose
Add support for placement preferences docker-compose v3.3+ (#1425)
This commit is contained in:
parent
c921643705
commit
8cb1b0599e
@ -216,6 +216,7 @@ type Volumes struct {
|
||||
type Placement struct {
|
||||
PositiveConstraints map[string]string
|
||||
NegativeConstraints map[string]string
|
||||
Preferences []string
|
||||
}
|
||||
|
||||
// GetConfigMapKeyFromMeta ...
|
||||
|
||||
@ -595,6 +595,11 @@ func TestCheckPlacementCustomLabels(t *testing.T) {
|
||||
"node.labels.something == anything",
|
||||
"node.labels.monitor != xxx",
|
||||
},
|
||||
Preferences: []types.PlacementPreferences{
|
||||
{Spread: "node.labels.zone"},
|
||||
{Spread: "foo"},
|
||||
{Spread: "node.labels.ssd"},
|
||||
},
|
||||
}
|
||||
output := loadV3Placement(placement)
|
||||
|
||||
@ -605,10 +610,22 @@ func TestCheckPlacementCustomLabels(t *testing.T) {
|
||||
NegativeConstraints: map[string]string{
|
||||
"monitor": "xxx",
|
||||
},
|
||||
Preferences: []string{
|
||||
"zone", "ssd",
|
||||
},
|
||||
}
|
||||
|
||||
checkConstraints(t, "positive", output.PositiveConstraints, expected.PositiveConstraints)
|
||||
checkConstraints(t, "negative", output.NegativeConstraints, expected.NegativeConstraints)
|
||||
|
||||
if len(output.Preferences) != len(expected.Preferences) {
|
||||
t.Errorf("preferences len is not equal, expected %d, got %d", len(expected.Preferences), len(output.Preferences))
|
||||
}
|
||||
for i := range output.Preferences {
|
||||
if output.Preferences[i] != expected.Preferences[i] {
|
||||
t.Errorf("preference is not equal, expected %s, got %s", expected.Preferences[i], output.Preferences[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkConstraints(t *testing.T, caseName string, output, expected map[string]string) {
|
||||
|
||||
@ -137,9 +137,11 @@ func loadV3Placement(placement types.Placement) kobject.Placement {
|
||||
komposePlacement := kobject.Placement{
|
||||
PositiveConstraints: make(map[string]string),
|
||||
NegativeConstraints: make(map[string]string),
|
||||
Preferences: make([]string, 0, len(placement.Preferences)),
|
||||
}
|
||||
|
||||
// Convert constraints
|
||||
equal, notEqual := " == ", " != "
|
||||
errMsg := " constraints in placement is not supported, only 'node.hostname', 'engine.labels.operatingsystem' and 'node.labels.xxx' (ex: node.labels.something == anything) is supported as a constraint "
|
||||
for _, j := range placement.Constraints {
|
||||
operator := equal
|
||||
if strings.Contains(j, notEqual) {
|
||||
@ -147,19 +149,13 @@ func loadV3Placement(placement types.Placement) kobject.Placement {
|
||||
}
|
||||
p := strings.Split(j, operator)
|
||||
if len(p) < 2 {
|
||||
log.Warn(p[0], errMsg)
|
||||
log.Warnf("Failed to parse placement constraints %s, the correct format is 'label == xxx'", j)
|
||||
continue
|
||||
}
|
||||
|
||||
var key string
|
||||
if p[0] == "node.hostname" {
|
||||
key = "kubernetes.io/hostname"
|
||||
} else if p[0] == "engine.labels.operatingsystem" {
|
||||
key = "beta.kubernetes.io/os"
|
||||
} else if strings.HasPrefix(p[0], "node.labels.") {
|
||||
key = strings.TrimPrefix(p[0], "node.labels.")
|
||||
} else {
|
||||
log.Warn(p[0], errMsg)
|
||||
key, err := convertDockerLabel(p[0])
|
||||
if err != nil {
|
||||
log.Warn("Ignore placement constraints: ", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -169,9 +165,36 @@ func loadV3Placement(placement types.Placement) kobject.Placement {
|
||||
komposePlacement.NegativeConstraints[key] = p[1]
|
||||
}
|
||||
}
|
||||
|
||||
// Convert preferences
|
||||
for _, p := range placement.Preferences {
|
||||
// Spread is the only supported strategy currently
|
||||
label, err := convertDockerLabel(p.Spread)
|
||||
if err != nil {
|
||||
log.Warn("Ignore placement preferences: ", err.Error())
|
||||
continue
|
||||
}
|
||||
komposePlacement.Preferences = append(komposePlacement.Preferences, label)
|
||||
}
|
||||
return komposePlacement
|
||||
}
|
||||
|
||||
// Convert docker label to k8s label
|
||||
func convertDockerLabel(dockerLabel string) (string, error) {
|
||||
switch dockerLabel {
|
||||
case "node.hostname":
|
||||
return "kubernetes.io/hostname", nil
|
||||
case "engine.labels.operatingsystem":
|
||||
return "kubernetes.io/os", nil
|
||||
default:
|
||||
if strings.HasPrefix(dockerLabel, "node.labels.") {
|
||||
return strings.TrimPrefix(dockerLabel, "node.labels."), nil
|
||||
}
|
||||
}
|
||||
errMsg := fmt.Sprint(dockerLabel, " is not supported, only 'node.hostname', 'engine.labels.operatingsystem' and 'node.labels.xxx' (ex: node.labels.something == anything) is supported")
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Convert the Docker Compose v3 volumes to []string (the old way)
|
||||
// TODO: Check to see if it's a "bind" or "volume". Ignore for now.
|
||||
// TODO: Refactor it similar to loadV3Ports
|
||||
|
||||
@ -534,6 +534,7 @@ func (k *Kubernetes) UpdateKubernetesObjects(name string, service kobject.Servic
|
||||
template.Spec.Volumes = append(template.Spec.Volumes, volumes...)
|
||||
}
|
||||
template.Spec.Affinity = ConfigAffinity(service)
|
||||
template.Spec.TopologySpreadConstraints = ConfigTopologySpreadConstraints(service)
|
||||
// Configure the HealthCheck
|
||||
template.Spec.Containers[0].LivenessProbe = configProbe(service.HealthChecks.Liveness)
|
||||
template.Spec.Containers[0].ReadinessProbe = configProbe(service.HealthChecks.Readiness)
|
||||
|
||||
@ -29,8 +29,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/godoc/util"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/kubernetes/kompose/pkg/kobject"
|
||||
"github.com/kubernetes/kompose/pkg/loader/compose"
|
||||
@ -40,6 +38,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cast"
|
||||
"golang.org/x/tools/godoc/util"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
@ -1073,12 +1072,13 @@ func ConfigEnvs(service kobject.ServiceConfig, opt kobject.ConvertOptions) ([]ap
|
||||
|
||||
// ConfigAffinity configures the Affinity.
|
||||
func ConfigAffinity(service kobject.ServiceConfig) *api.Affinity {
|
||||
var affinity *api.Affinity
|
||||
// Config constraints
|
||||
// Convert constraints to requiredDuringSchedulingIgnoredDuringExecution
|
||||
positiveConstraints := configConstrains(service.Placement.PositiveConstraints, api.NodeSelectorOpIn)
|
||||
negativeConstraints := configConstrains(service.Placement.NegativeConstraints, api.NodeSelectorOpNotIn)
|
||||
if len(positiveConstraints) == 0 && len(negativeConstraints) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &api.Affinity{
|
||||
if len(positiveConstraints) != 0 || len(negativeConstraints) != 0 {
|
||||
affinity = &api.Affinity{
|
||||
NodeAffinity: &api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
@ -1089,6 +1089,35 @@ func ConfigAffinity(service kobject.ServiceConfig) *api.Affinity {
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return affinity
|
||||
}
|
||||
|
||||
// ConfigTopologySpreadConstraints configures the TopologySpreadConstraints.
|
||||
func ConfigTopologySpreadConstraints(service kobject.ServiceConfig) []api.TopologySpreadConstraint {
|
||||
preferencesLen := len(service.Placement.Preferences)
|
||||
constraints := make([]api.TopologySpreadConstraint, 0, preferencesLen)
|
||||
|
||||
// Placement preferences are ignored for global services
|
||||
if service.DeployMode == "global" {
|
||||
log.Warnf("Ignore placement preferences for global service %s", service.Name)
|
||||
return constraints
|
||||
}
|
||||
|
||||
for i, p := range service.Placement.Preferences {
|
||||
constraints = append(constraints, api.TopologySpreadConstraint{
|
||||
// According to the order of preferences, the MaxSkew decreases in order
|
||||
// The minimum value is 1
|
||||
MaxSkew: int32(preferencesLen - i),
|
||||
TopologyKey: p,
|
||||
WhenUnsatisfiable: api.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: transformer.ConfigLabels(service.Name),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return constraints
|
||||
}
|
||||
|
||||
func configConstrains(constrains map[string]string, operator api.NodeSelectorOperator) []api.NodeSelectorRequirement {
|
||||
@ -1404,6 +1433,7 @@ func (k *Kubernetes) Transform(komposeObject kobject.KomposeObject, opt kobject.
|
||||
ResourcesLimits(service),
|
||||
ResourcesRequests(service),
|
||||
TerminationGracePeriodSeconds(name, service),
|
||||
TopologySpreadConstraints(service),
|
||||
)
|
||||
|
||||
if serviceAccountName, ok := service.Labels[compose.LabelServiceAccountName]; ok {
|
||||
|
||||
@ -668,6 +668,51 @@ func TestConfigAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigTopologySpreadConstraints(t *testing.T) {
|
||||
serviceName := "app"
|
||||
testCases := map[string]struct {
|
||||
service kobject.ServiceConfig
|
||||
result []api.TopologySpreadConstraint
|
||||
}{
|
||||
"ConfigTopologySpreadConstraint": {
|
||||
service: kobject.ServiceConfig{
|
||||
Name: serviceName,
|
||||
Placement: kobject.Placement{
|
||||
Preferences: []string{
|
||||
"zone", "ssd",
|
||||
},
|
||||
},
|
||||
},
|
||||
result: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: api.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: transformer.ConfigLabels(serviceName),
|
||||
},
|
||||
},
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "ssd",
|
||||
WhenUnsatisfiable: api.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: transformer.ConfigLabels(serviceName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range testCases {
|
||||
t.Log("Test case:", name)
|
||||
result := ConfigTopologySpreadConstraints(test.service)
|
||||
if !reflect.DeepEqual(result, test.result) {
|
||||
t.Errorf("Not expected result for ConfigTopologySpreadConstraints")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleContainersInPod(t *testing.T) {
|
||||
groupName := "pod_group"
|
||||
|
||||
|
||||
@ -306,6 +306,12 @@ func ServiceAccountName(serviceAccountName string) PodSpecOption {
|
||||
}
|
||||
}
|
||||
|
||||
func TopologySpreadConstraints(service kobject.ServiceConfig) PodSpecOption {
|
||||
return func(podSpec *PodSpec) {
|
||||
podSpec.TopologySpreadConstraints = ConfigTopologySpreadConstraints(service)
|
||||
}
|
||||
}
|
||||
|
||||
func (podSpec *PodSpec) Append(ops ...PodSpecOption) *PodSpec {
|
||||
for _, option := range ops {
|
||||
option(podSpec)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
version: "3"
|
||||
version: "3.3"
|
||||
services:
|
||||
redis:
|
||||
image: redis
|
||||
@ -11,3 +11,7 @@ services:
|
||||
- engine.labels.operatingsystem == ubuntu 14.04
|
||||
- node.labels.foo != bar
|
||||
- baz != qux
|
||||
preferences:
|
||||
- spread: node.labels.zone
|
||||
- spread: foo
|
||||
- spread: node.labels.ssd
|
||||
@ -76,26 +76,54 @@
|
||||
{
|
||||
"key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["machine"]
|
||||
"values": [
|
||||
"machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "beta.kubernetes.io/os",
|
||||
"key": "kubernetes.io/os",
|
||||
"operator": "In",
|
||||
"values": ["ubuntu 14.04"]
|
||||
"values": [
|
||||
"ubuntu 14.04"
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "NotIn",
|
||||
"values": ["bar"]
|
||||
"values": [
|
||||
"bar"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"topologySpreadConstraints": [
|
||||
{
|
||||
"maxSkew": 2,
|
||||
"topologyKey": "zone",
|
||||
"whenUnsatisfiable": "ScheduleAnyway",
|
||||
"labelSelector": {
|
||||
"matchLabels": {
|
||||
"io.kompose.service": "redis"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"maxSkew": 1,
|
||||
"topologyKey": "ssd",
|
||||
"whenUnsatisfiable": "ScheduleAnyway",
|
||||
"labelSelector": {
|
||||
"matchLabels": {
|
||||
"io.kompose.service": "redis"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"strategy": {}
|
||||
},
|
||||
"status": {}
|
||||
|
||||
@ -96,25 +96,53 @@
|
||||
{
|
||||
"key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["machine"]
|
||||
"values": [
|
||||
"machine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "beta.kubernetes.io/os",
|
||||
"key": "kubernetes.io/os",
|
||||
"operator": "In",
|
||||
"values": ["ubuntu 14.04"]
|
||||
"values": [
|
||||
"ubuntu 14.04"
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "foo",
|
||||
"operator": "NotIn",
|
||||
"values": ["bar"]
|
||||
"values": [
|
||||
"bar"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"topologySpreadConstraints": [
|
||||
{
|
||||
"maxSkew": 2,
|
||||
"topologyKey": "zone",
|
||||
"whenUnsatisfiable": "ScheduleAnyway",
|
||||
"labelSelector": {
|
||||
"matchLabels": {
|
||||
"io.kompose.service": "redis"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"maxSkew": 1,
|
||||
"topologyKey": "ssd",
|
||||
"whenUnsatisfiable": "ScheduleAnyway",
|
||||
"labelSelector": {
|
||||
"matchLabels": {
|
||||
"io.kompose.service": "redis"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"status": {
|
||||
|
||||
@ -89,7 +89,7 @@
|
||||
"values": ["machine"]
|
||||
},
|
||||
{
|
||||
"key": "beta.kubernetes.io/os",
|
||||
"key": "kubernetes.io/oss",
|
||||
"operator": "In",
|
||||
"values": ["ubuntu 14.04"]
|
||||
},
|
||||
|
||||
@ -104,7 +104,7 @@
|
||||
"values": ["machine"]
|
||||
},
|
||||
{
|
||||
"key": "beta.kubernetes.io/os",
|
||||
"key": "kubernetes.io/os",
|
||||
"operator": "In",
|
||||
"values": ["ubuntu 14.04"]
|
||||
},
|
||||
|
||||
Loading…
Reference in New Issue
Block a user