Merge pull request #600 from cdrage/add-v3-support

Add v3 support of Docker Compose
This commit is contained in:
Suraj Deshmukh 2017-06-14 10:59:05 +05:30 committed by GitHub
commit 3be76ff32b
183 changed files with 31044 additions and 22473 deletions

41
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 3036ae90e1ce5e101dedcf0270bae7f148557cbb7f94e537784bb28650d088bb
updated: 2017-05-15T18:20:57.195406298+05:30
hash: 41c9e4c113b17cb5362f516e8d14147e7b312099985b55df8c71c3b0b4e13fb4
updated: 2017-05-24T11:02:01.662886713-04:00
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
@ -103,6 +103,15 @@ imports:
- spew
- name: github.com/dgrijalva/jwt-go
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20
- name: github.com/docker/cli
version: 1fc7eb5d644599f30d0c6cc350a4d84ff528c864
subpackages:
- cli/compose/interpolation
- cli/compose/loader
- cli/compose/schema
- cli/compose/template
- cli/compose/types
- opts
- name: github.com/docker/distribution
version: 12acdf0a6c1e56d965ac6eb395d2bce687bf22fc
subpackages:
@ -148,7 +157,7 @@ imports:
- uuid
- version
- name: github.com/docker/docker
version: 601004e1a714d77d3a43e957b8ae8adbc867b280
version: 58b1788c81f937bb2aaf1b0077c6b3b23a8397ff
subpackages:
- api/types
- api/types/blkiodev
@ -160,9 +169,6 @@ imports:
- api/types/strslice
- api/types/swarm
- api/types/versions
- opts
- pkg/mount
- pkg/signal
- pkg/urlutil
- runconfig/opts
- name: github.com/docker/engine-api
@ -210,7 +216,7 @@ imports:
- name: github.com/evanphx/json-patch
version: 465937c80b3c07a7c7ad20cc934898646a91c1de
- name: github.com/fatih/structs
version: a720dfa8df582c51dee1b36feabb906bde1588bd
version: 74a29b9fac7397d933f47e33eba94d2d83a464a2
- name: github.com/flynn/go-shlex
version: 3f9db97f856818214da2e1057f8ad84803971cff
- name: github.com/fsnotify/fsnotify
@ -352,12 +358,21 @@ imports:
version: 77ed1c8a01217656d2080ad51981f6e99adaa177
- name: github.com/magiconair/properties
version: 51463bfca2576e06c62a8504b5c0f06d61312647
- name: github.com/mattn/go-shellwords
version: 02e3cf038dcea8290e44424da473dd12be796a8a
- name: github.com/matttproud/golang_protobuf_extensions
version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
subpackages:
- pbutil
- name: github.com/mitchellh/mapstructure
version: cc8532a8e9a55ea36402aa21efdf403a60d34096
version: d0303fe809921458f417bcf828397a65db30a7e4
- name: github.com/opencontainers/go-digest
version: eaa60544f31ccf3b0653b1a118b76d33418ff41b
- name: github.com/opencontainers/image-spec
version: 1a6593ab6c3ab5902072b4694a22ff19425396ae
subpackages:
- specs-go
- specs-go/v1
- name: github.com/openshift/origin
version: b4e0954faa4a0d11d9c1a536b76ad4a8c0206b7c
subpackages:
@ -372,6 +387,7 @@ imports:
- pkg/build/api/install
- pkg/build/api/v1
- pkg/build/client
- pkg/build/cmd
- pkg/build/util
- pkg/client
- pkg/cmd/cli/config
@ -398,6 +414,7 @@ imports:
- pkg/security/api
- pkg/template/api
- pkg/user/api
- pkg/util
- pkg/util/namer
- pkg/version
- name: github.com/pborman/uuid
@ -405,7 +422,7 @@ imports:
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: 685a1f1cb7a66b9cadbe8f1ac49d9f8f567d6a9d
version: 5c26a6ff6fd178719e15decac1c8196da0d7d6d1
- name: github.com/pkg/errors
version: c605e284fe17294bda444b34710735b29d1a9d90
- name: github.com/prometheus/client_golang
@ -433,9 +450,9 @@ imports:
- name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
- name: github.com/spf13/cobra
version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77
version: ca57f0f5dba473a8a58765d16d7e811fb8027add
- name: github.com/spf13/jwalterweatherman
version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99
version: 0efa5202c04663c757d84f90f5219c1250baf94f
- name: github.com/spf13/pflag
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/spf13/viper
@ -473,7 +490,7 @@ imports:
- jws
- jwt
- name: golang.org/x/sys
version: 1e99a4f9d247b28c670884b9a8d6801f39a47b77
version: dbc2be9168a660ef302e04b6ff6406de6f967473
subpackages:
- unix
- name: golang.org/x/text

View File

@ -13,6 +13,7 @@ import:
- package: github.com/spf13/viper
- package: github.com/pkg/errors
# We use libcompose to parse v1 and v2 of Docker Compose
- package: github.com/docker/libcompose
version: 01ff8920a57835fda41607e0f27252408e99cf08
subpackages:
@ -20,6 +21,10 @@ import:
- lookup
- project
# We use docker/cli to parse v3 of Docker Compose
- package: github.com/docker/cli
version: 1fc7eb5d644599f30d0c6cc350a4d84ff528c864
- package: github.com/openshift/origin
version: v1.4.0-rc1
subpackages:
@ -63,5 +68,4 @@ import:
# this makes sure that we are using version that is compatible with both.
# Glide will show WARN about conflicting rev (it this case it is OK)
- package: github.com/docker/docker
version: 601004e1a714d77d3a43e957b8ae8adbc867b280
version: 58b1788c81f937bb2aaf1b0077c6b3b23a8397ff

View File

@ -18,18 +18,13 @@ package compose
import (
"fmt"
"net"
"os"
"path/filepath"
"io/ioutil"
"reflect"
"strconv"
"strings"
"k8s.io/kubernetes/pkg/api"
yaml "gopkg.in/yaml.v2"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project"
"github.com/fatih/structs"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
@ -134,277 +129,65 @@ func checkUnsupportedKey(composeProject *project.Project) []string {
return keysFound
}
// load environment variables from compose file
func loadEnvVars(envars []string) []kobject.EnvVar {
envs := []kobject.EnvVar{}
for _, e := range envars {
character := ""
equalPos := strings.Index(e, "=")
colonPos := strings.Index(e, ":")
switch {
case equalPos == -1 && colonPos == -1:
character = ""
case equalPos == -1 && colonPos != -1:
character = ":"
case equalPos != -1 && colonPos == -1:
character = "="
case equalPos != -1 && colonPos != -1:
if equalPos > colonPos {
character = ":"
} else {
character = "="
}
}
if character == "" {
envs = append(envs, kobject.EnvVar{
Name: e,
Value: os.Getenv(e),
})
} else {
values := strings.SplitN(e, character, 2)
// try to get value from os env
if values[1] == "" {
values[1] = os.Getenv(values[0])
}
envs = append(envs, kobject.EnvVar{
Name: values[0],
Value: values[1],
})
}
}
return envs
}
// Load ports from compose file
func loadPorts(composePorts []string) ([]kobject.Ports, error) {
ports := []kobject.Ports{}
character := ":"
// For each port listed
for _, port := range composePorts {
// Get the TCP / UDP protocol. Checks to see if it splits in 2 with '/' character.
// ex. 15000:15000/tcp
// else, set a default protocol of using TCP
proto := api.ProtocolTCP
protocolCheck := strings.Split(port, "/")
if len(protocolCheck) == 2 {
if strings.EqualFold("tcp", protocolCheck[1]) {
proto = api.ProtocolTCP
} else if strings.EqualFold("udp", protocolCheck[1]) {
proto = api.ProtocolUDP
} else {
return nil, fmt.Errorf("invalid protocol %q", protocolCheck[1])
}
}
// Split up the ports / IP without the "/tcp" or "/udp" appended to it
justPorts := strings.Split(protocolCheck[0], character)
if len(justPorts) == 3 {
// ex. 127.0.0.1:80:80
// Get the IP address
hostIP := justPorts[0]
ip := net.ParseIP(hostIP)
if ip.To4() == nil && ip.To16() == nil {
return nil, fmt.Errorf("%q contains an invalid IPv4 or IPv6 IP address", port)
}
// Get the host port
hostPortInt, err := strconv.Atoi(justPorts[1])
if err != nil {
return nil, fmt.Errorf("invalid host port %q valid example: 127.0.0.1:80:80", port)
}
// Get the container port
containerPortInt, err := strconv.Atoi(justPorts[2])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 127.0.0.1:80:80", port)
}
// Convert to a kobject struct with ports as well as IP
ports = append(ports, kobject.Ports{
HostPort: int32(hostPortInt),
ContainerPort: int32(containerPortInt),
HostIP: hostIP,
Protocol: proto,
})
} else if len(justPorts) == 2 {
// ex. 80:80
// Get the host port
hostPortInt, err := strconv.Atoi(justPorts[0])
if err != nil {
return nil, fmt.Errorf("invalid host port %q valid example: 80:80", port)
}
// Get the container port
containerPortInt, err := strconv.Atoi(justPorts[1])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 80:80", port)
}
// Convert to a kobject struct and add to the list of ports
ports = append(ports, kobject.Ports{
HostPort: int32(hostPortInt),
ContainerPort: int32(containerPortInt),
Protocol: proto,
})
} else {
// ex. 80
containerPortInt, err := strconv.Atoi(justPorts[0])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 80", port)
}
ports = append(ports, kobject.Ports{
ContainerPort: int32(containerPortInt),
Protocol: proto,
})
}
}
return ports, nil
}
// LoadFile loads compose file into KomposeObject
// LoadFile loads a compose file into KomposeObject
func (c *Compose) LoadFile(files []string) (kobject.KomposeObject, error) {
komposeObject := kobject.KomposeObject{
ServiceConfigs: make(map[string]kobject.ServiceConfig),
LoadedFrom: "compose",
}
context := &project.Context{}
context.ComposeFiles = files
if context.ResourceLookup == nil {
context.ResourceLookup = &lookup.FileResourceLookup{}
}
// Load the json / yaml file in order to get the version value
var version string
if context.EnvironmentLookup == nil {
cwd, err := os.Getwd()
for _, file := range files {
composeVersion, err := getVersionFromFile(file)
if err != nil {
return kobject.KomposeObject{}, nil
}
context.EnvironmentLookup = &lookup.ComposableEnvLookup{
Lookups: []config.EnvironmentLookup{
&lookup.EnvfileLookup{
Path: filepath.Join(cwd, ".env"),
},
&lookup.OsEnvLookup{},
},
}
return kobject.KomposeObject{}, errors.Wrap(err, "Unable to load yaml/json file for version parsing")
}
// load compose file into composeObject
composeObject := project.NewProject(context, nil, nil)
err := composeObject.Parse()
// Check that the previous file loaded matches.
if len(files) > 0 && version != "" && version != composeVersion {
return kobject.KomposeObject{}, errors.New("All Docker Compose files must be of the same version")
}
version = composeVersion
}
log.Debugf("Docker Compose version: %s", version)
// Convert based on version
switch version {
// Use libcompose for 1 or 2
// If blank, it's assumed it's 1 or 2
case "", "1", "1.0", "2", "2.0":
komposeObject, err := parseV1V2(files)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "composeObject.Parse() failed, Failed to load compose file")
return kobject.KomposeObject{}, err
}
noSupKeys := checkUnsupportedKey(composeObject)
for _, keyName := range noSupKeys {
log.Warningf("Unsupported %s key - ignoring", keyName)
}
for name, composeServiceConfig := range composeObject.ServiceConfigs.All() {
serviceConfig := kobject.ServiceConfig{}
serviceConfig.Image = composeServiceConfig.Image
serviceConfig.Build = composeServiceConfig.Build.Context
newName := normalizeServiceNames(composeServiceConfig.ContainerName)
serviceConfig.ContainerName = newName
if newName != composeServiceConfig.ContainerName {
log.Infof("Container name in service %q has been changed from %q to %q", name, composeServiceConfig.ContainerName, newName)
}
serviceConfig.Command = composeServiceConfig.Entrypoint
serviceConfig.Args = composeServiceConfig.Command
serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
serviceConfig.BuildArgs = composeServiceConfig.Build.Args
envs := loadEnvVars(composeServiceConfig.Environment)
serviceConfig.Environment = envs
//Validate dockerfile path
if filepath.IsAbs(serviceConfig.Dockerfile) {
log.Fatalf("%q defined in service %q is an absolute path, it must be a relative path.", serviceConfig.Dockerfile, name)
}
// load ports
ports, err := loadPorts(composeServiceConfig.Ports)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "loadPorts failed. "+name+" failed to load ports from compose file")
}
serviceConfig.Port = ports
serviceConfig.WorkingDir = composeServiceConfig.WorkingDir
if composeServiceConfig.Volumes != nil {
for _, volume := range composeServiceConfig.Volumes.Volumes {
v := normalizeServiceNames(volume.String())
serviceConfig.Volumes = append(serviceConfig.Volumes, v)
}
}
// canonical "Custom Labels" handler
// Labels used to influence conversion of kompose will be handled
// from here for docker-compose. Each loader will have such handler.
for key, value := range composeServiceConfig.Labels {
switch key {
case "kompose.service.type":
serviceType, err := handleServiceType(value)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "handleServiceType failed")
}
serviceConfig.ServiceType = serviceType
case "kompose.service.expose":
serviceConfig.ExposeService = strings.ToLower(value)
}
}
// convert compose labels to annotations
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.CPUQuota = int64(composeServiceConfig.CPUQuota)
serviceConfig.CapAdd = composeServiceConfig.CapAdd
serviceConfig.CapDrop = composeServiceConfig.CapDrop
serviceConfig.Pid = composeServiceConfig.Pid
serviceConfig.Expose = composeServiceConfig.Expose
serviceConfig.Privileged = composeServiceConfig.Privileged
serviceConfig.Restart = composeServiceConfig.Restart
serviceConfig.User = composeServiceConfig.User
serviceConfig.VolumesFrom = composeServiceConfig.VolumesFrom
serviceConfig.Stdin = composeServiceConfig.StdinOpen
serviceConfig.Tty = composeServiceConfig.Tty
serviceConfig.MemLimit = composeServiceConfig.MemLimit
serviceConfig.TmpFs = composeServiceConfig.Tmpfs
serviceConfig.StopGracePeriod = composeServiceConfig.StopGracePeriod
komposeObject.ServiceConfigs[normalizeServiceNames(name)] = serviceConfig
if normalizeServiceNames(name) != name {
log.Infof("Service name in docker-compose has been changed from %q to %q", name, normalizeServiceNames(name))
}
}
return komposeObject, nil
// Use docker/cli for 3
case "3", "3.0":
komposeObject, err := parseV3(files)
if err != nil {
return kobject.KomposeObject{}, err
}
func handleServiceType(ServiceType string) (string, error) {
switch strings.ToLower(ServiceType) {
case "", "clusterip":
return string(api.ServiceTypeClusterIP), nil
case "nodeport":
return string(api.ServiceTypeNodePort), nil
case "loadbalancer":
return string(api.ServiceTypeLoadBalancer), nil
return komposeObject, nil
default:
return "", errors.New("Unknown value " + ServiceType + " , supported values are 'NodePort, ClusterIP or LoadBalancer'")
}
return kobject.KomposeObject{}, fmt.Errorf("Version %s of Docker Compose is not supported. Please use version 1, 2 or 3", version)
}
func normalizeServiceNames(svcName string) string {
return strings.Replace(svcName, "_", "-", -1)
}
func getVersionFromFile(file string) (string, error) {
type ComposeVersion struct {
Version string `json:"version"` // This affects YAML as well
}
var version ComposeVersion
loadedFile, err := ioutil.ReadFile(file)
if err != nil {
return "", err
}
err = yaml.Unmarshal(loadedFile, &version)
if err != nil {
return "", err
}
return version.Version, nil
}

View File

@ -25,12 +25,49 @@ import (
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"k8s.io/kubernetes/pkg/api"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/yaml"
"github.com/pkg/errors"
)
func TestLoadV3Volumes(t *testing.T) {
vol := types.ServiceVolumeConfig{
Type: "volume",
Source: "/tmp/foobar",
Target: "/tmp/foobar",
ReadOnly: true,
}
volumes := []types.ServiceVolumeConfig{vol}
output := loadV3Volumes(volumes)
expected := "/tmp/foobar:/tmp/foobar:ro"
if output[0] != expected {
t.Errorf("Expected %s, got %s", expected, output[0])
}
}
func TestLoadV3Ports(t *testing.T) {
port := types.ServicePortConfig{
Target: 80,
Published: 80,
Protocol: "TCP",
}
ports := []types.ServicePortConfig{port}
output := loadV3Ports(ports)
expected := kobject.Ports{
HostPort: 80,
ContainerPort: 80,
Protocol: api.Protocol("TCP"),
}
if output[0] != expected {
t.Errorf("Expected %s, got %s", expected, output[0])
}
}
// Test if service types are parsed properly on user input
// give a service type and expect correct input
func TestHandleServiceType(t *testing.T) {

102
pkg/loader/compose/utils.go Normal file
View File

@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"os"
"path/filepath"
"strings"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api"
)
// load environment variables from compose file
func loadEnvVars(envars []string) []kobject.EnvVar {
envs := []kobject.EnvVar{}
for _, e := range envars {
character := ""
equalPos := strings.Index(e, "=")
colonPos := strings.Index(e, ":")
switch {
case equalPos == -1 && colonPos == -1:
character = ""
case equalPos == -1 && colonPos != -1:
character = ":"
case equalPos != -1 && colonPos == -1:
character = "="
case equalPos != -1 && colonPos != -1:
if equalPos > colonPos {
character = ":"
} else {
character = "="
}
}
if character == "" {
envs = append(envs, kobject.EnvVar{
Name: e,
Value: os.Getenv(e),
})
} else {
values := strings.SplitN(e, character, 2)
// try to get value from os env
if values[1] == "" {
values[1] = os.Getenv(values[0])
}
envs = append(envs, kobject.EnvVar{
Name: values[0],
Value: values[1],
})
}
}
return envs
}
// getComposeFileDir returns compose file directory
// Assume all the docker-compose files are in the same directory
// TODO: fix (check if file exists)
func getComposeFileDir(inputFiles []string) (string, error) {
inputFile := inputFiles[0]
if strings.Index(inputFile, "/") != 0 {
workDir, err := os.Getwd()
if err != nil {
return "", errors.Wrap(err, "Unable to retrieve compose file directory")
}
inputFile = filepath.Join(workDir, inputFile)
}
return filepath.Dir(inputFile), nil
}
func handleServiceType(ServiceType string) (string, error) {
switch strings.ToLower(ServiceType) {
case "", "clusterip":
return string(api.ServiceTypeClusterIP), nil
case "nodeport":
return string(api.ServiceTypeNodePort), nil
case "loadbalancer":
return string(api.ServiceTypeLoadBalancer), nil
default:
return "", errors.New("Unknown value " + ServiceType + " , supported values are 'NodePort, ClusterIP or LoadBalancer'")
}
}
func normalizeServiceNames(svcName string) string {
return strings.Replace(svcName, "_", "-", -1)
}

268
pkg/loader/compose/v1v2.go Normal file
View File

@ -0,0 +1,268 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"k8s.io/kubernetes/pkg/api"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/pkg/errors"
)
// Parse Docker Compose with libcompose (only supports v1 and v2). Eventually we will
// switch to using only libcompose once v3 is supported.
func parseV1V2(files []string) (kobject.KomposeObject, error) {
// Gather the appropriate context for parsing
context := &project.Context{}
context.ComposeFiles = files
if context.ResourceLookup == nil {
context.ResourceLookup = &lookup.FileResourceLookup{}
}
if context.EnvironmentLookup == nil {
cwd, err := os.Getwd()
if err != nil {
return kobject.KomposeObject{}, nil
}
context.EnvironmentLookup = &lookup.ComposableEnvLookup{
Lookups: []config.EnvironmentLookup{
&lookup.EnvfileLookup{
Path: filepath.Join(cwd, ".env"),
},
&lookup.OsEnvLookup{},
},
}
}
// Load the context and let's start parsing
composeObject := project.NewProject(context, nil, nil)
err := composeObject.Parse()
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "composeObject.Parse() failed, Failed to load compose file")
}
noSupKeys := checkUnsupportedKey(composeObject)
for _, keyName := range noSupKeys {
log.Warningf("Unsupported %s key - ignoring", keyName)
}
// Map the parsed struct to a struct we understand (kobject)
komposeObject, err := libComposeToKomposeMapping(composeObject)
if err != nil {
return kobject.KomposeObject{}, err
}
return komposeObject, nil
}
// Load ports from compose file
func loadPorts(composePorts []string) ([]kobject.Ports, error) {
ports := []kobject.Ports{}
character := ":"
// For each port listed
for _, port := range composePorts {
// Get the TCP / UDP protocol. Checks to see if it splits in 2 with '/' character.
// ex. 15000:15000/tcp
// else, set a default protocol of using TCP
proto := api.ProtocolTCP
protocolCheck := strings.Split(port, "/")
if len(protocolCheck) == 2 {
if strings.EqualFold("tcp", protocolCheck[1]) {
proto = api.ProtocolTCP
} else if strings.EqualFold("udp", protocolCheck[1]) {
proto = api.ProtocolUDP
} else {
return nil, fmt.Errorf("invalid protocol %q", protocolCheck[1])
}
}
// Split up the ports / IP without the "/tcp" or "/udp" appended to it
justPorts := strings.Split(protocolCheck[0], character)
if len(justPorts) == 3 {
// ex. 127.0.0.1:80:80
// Get the IP address
hostIP := justPorts[0]
ip := net.ParseIP(hostIP)
if ip.To4() == nil && ip.To16() == nil {
return nil, fmt.Errorf("%q contains an invalid IPv4 or IPv6 IP address", port)
}
// Get the host port
hostPortInt, err := strconv.Atoi(justPorts[1])
if err != nil {
return nil, fmt.Errorf("invalid host port %q valid example: 127.0.0.1:80:80", port)
}
// Get the container port
containerPortInt, err := strconv.Atoi(justPorts[2])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 127.0.0.1:80:80", port)
}
// Convert to a kobject struct with ports as well as IP
ports = append(ports, kobject.Ports{
HostPort: int32(hostPortInt),
ContainerPort: int32(containerPortInt),
HostIP: hostIP,
Protocol: proto,
})
} else if len(justPorts) == 2 {
// ex. 80:80
// Get the host port
hostPortInt, err := strconv.Atoi(justPorts[0])
if err != nil {
return nil, fmt.Errorf("invalid host port %q valid example: 80:80", port)
}
// Get the container port
containerPortInt, err := strconv.Atoi(justPorts[1])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 80:80", port)
}
// Convert to a kobject struct and add to the list of ports
ports = append(ports, kobject.Ports{
HostPort: int32(hostPortInt),
ContainerPort: int32(containerPortInt),
Protocol: proto,
})
} else {
// ex. 80
containerPortInt, err := strconv.Atoi(justPorts[0])
if err != nil {
return nil, fmt.Errorf("invalid container port %q valid example: 80", port)
}
ports = append(ports, kobject.Ports{
ContainerPort: int32(containerPortInt),
Protocol: proto,
})
}
}
return ports, nil
}
// Uses libcompose's APIProject type and converts it to a Kompose object for us to understand
func libComposeToKomposeMapping(composeObject *project.Project) (kobject.KomposeObject, error) {
// Initialize what's going to be returned
komposeObject := kobject.KomposeObject{
ServiceConfigs: make(map[string]kobject.ServiceConfig),
LoadedFrom: "compose",
}
// Here we "clean up" the service configuration so we return something that includes
// all relevant information as well as avoid the unsupported keys as well.
for name, composeServiceConfig := range composeObject.ServiceConfigs.All() {
serviceConfig := kobject.ServiceConfig{}
serviceConfig.Image = composeServiceConfig.Image
serviceConfig.Build = composeServiceConfig.Build.Context
newName := normalizeServiceNames(composeServiceConfig.ContainerName)
serviceConfig.ContainerName = newName
if newName != composeServiceConfig.ContainerName {
log.Infof("Container name in service %q has been changed from %q to %q", name, composeServiceConfig.ContainerName, newName)
}
serviceConfig.Command = composeServiceConfig.Entrypoint
serviceConfig.Args = composeServiceConfig.Command
serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
serviceConfig.BuildArgs = composeServiceConfig.Build.Args
envs := loadEnvVars(composeServiceConfig.Environment)
serviceConfig.Environment = envs
//Validate dockerfile path
if filepath.IsAbs(serviceConfig.Dockerfile) {
log.Fatalf("%q defined in service %q is an absolute path, it must be a relative path.", serviceConfig.Dockerfile, name)
}
// load ports
ports, err := loadPorts(composeServiceConfig.Ports)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "loadPorts failed. "+name+" failed to load ports from compose file")
}
serviceConfig.Port = ports
serviceConfig.WorkingDir = composeServiceConfig.WorkingDir
if composeServiceConfig.Volumes != nil {
for _, volume := range composeServiceConfig.Volumes.Volumes {
v := normalizeServiceNames(volume.String())
serviceConfig.Volumes = append(serviceConfig.Volumes, v)
}
}
// canonical "Custom Labels" handler
// Labels used to influence conversion of kompose will be handled
// from here for docker-compose. Each loader will have such handler.
for key, value := range composeServiceConfig.Labels {
switch key {
case "kompose.service.type":
serviceType, err := handleServiceType(value)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "handleServiceType failed")
}
serviceConfig.ServiceType = serviceType
case "kompose.service.expose":
serviceConfig.ExposeService = strings.ToLower(value)
}
}
// convert compose labels to annotations
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.CPUQuota = int64(composeServiceConfig.CPUQuota)
serviceConfig.CapAdd = composeServiceConfig.CapAdd
serviceConfig.CapDrop = composeServiceConfig.CapDrop
serviceConfig.Pid = composeServiceConfig.Pid
serviceConfig.Expose = composeServiceConfig.Expose
serviceConfig.Privileged = composeServiceConfig.Privileged
serviceConfig.Restart = composeServiceConfig.Restart
serviceConfig.User = composeServiceConfig.User
serviceConfig.VolumesFrom = composeServiceConfig.VolumesFrom
serviceConfig.Stdin = composeServiceConfig.StdinOpen
serviceConfig.Tty = composeServiceConfig.Tty
serviceConfig.MemLimit = composeServiceConfig.MemLimit
serviceConfig.TmpFs = composeServiceConfig.Tmpfs
serviceConfig.StopGracePeriod = composeServiceConfig.StopGracePeriod
komposeObject.ServiceConfigs[normalizeServiceNames(name)] = serviceConfig
if normalizeServiceNames(name) != name {
log.Infof("Service name in docker-compose has been changed from %q to %q", name, normalizeServiceNames(name))
}
}
return komposeObject, nil
}

234
pkg/loader/compose/v3.go Normal file
View File

@ -0,0 +1,234 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
libcomposeyaml "github.com/docker/libcompose/yaml"
"io/ioutil"
"strings"
"k8s.io/kubernetes/pkg/api"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/types"
log "github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/pkg/errors"
)
// The purpose of this is not to deploy, but to be able to parse
// v3 of Docker Compose into a suitable format. In this case, whatever is returned
// by docker/cli's ServiceConfig
func parseV3(files []string) (kobject.KomposeObject, error) {
// In order to get V3 parsing to work, we have to go through some preliminary steps
// for us to hack up github.com/docker/cli in order to correctly convert to a kobject.KomposeObject
// Gather the working directory
workingDir, err := getComposeFileDir(files)
if err != nil {
return kobject.KomposeObject{}, err
}
// Load and then parse the YAML first!
loadedFile, err := ioutil.ReadFile(files[0])
if err != nil {
return kobject.KomposeObject{}, err
}
// Parse the Compose File
parsedComposeFile, err := loader.ParseYAML(loadedFile)
if err != nil {
return kobject.KomposeObject{}, err
}
// Config file
configFile := types.ConfigFile{
Filename: files[0],
Config: parsedComposeFile,
}
// Config details
// Environment is nil as docker/cli loads the appropriate environmental values itself
configDetails := types.ConfigDetails{
WorkingDir: workingDir,
ConfigFiles: []types.ConfigFile{configFile},
Environment: nil,
}
// Actual config
// We load it in order to retrieve the parsed output configuration!
// This will output a github.com/docker/cli ServiceConfig
// Which is similar to our version of ServiceConfig
config, err := loader.Load(configDetails)
if err != nil {
return kobject.KomposeObject{}, err
}
// TODO: Check all "unsupported" keys and output details
// Specifically, keys such as "volumes_from" are not supported in V3.
// Finally, we convert the object from docker/cli's ServiceConfig to our appropriate one
komposeObject, err := dockerComposeToKomposeMapping(config)
if err != nil {
return kobject.KomposeObject{}, err
}
return komposeObject, nil
}
// Convert the Docker Compose v3 volumes to []string (the old way)
// TODO: Check to see if it's a "bind" or "volume". Ignore for now.
// TODO: Refactor it similar to loadV3Ports
// See: https://docs.docker.com/compose/compose-file/#long-syntax-2
func loadV3Volumes(volumes []types.ServiceVolumeConfig) []string {
var volArray []string
for _, vol := range volumes {
// There will *always* be Source when parsing
v := normalizeServiceNames(vol.Source)
if vol.Target != "" {
v = v + ":" + vol.Target
}
if vol.ReadOnly {
v = v + ":ro"
}
volArray = append(volArray, v)
}
return volArray
}
// Convert Docker Compose v3 ports to kobject.Ports
func loadV3Ports(ports []types.ServicePortConfig) []kobject.Ports {
komposePorts := []kobject.Ports{}
for _, port := range ports {
// Convert to a kobject struct with ports
// NOTE: V3 doesn't use IP (they utilize Swarm instead for host-networking).
// Thus, IP is blank.
komposePorts = append(komposePorts, kobject.Ports{
HostPort: int32(port.Published),
ContainerPort: int32(port.Target),
HostIP: "",
Protocol: api.Protocol(strings.ToUpper(string(port.Protocol))),
})
}
return komposePorts
}
func dockerComposeToKomposeMapping(composeObject *types.Config) (kobject.KomposeObject, error) {
// Step 1. Initialize what's going to be returned
komposeObject := kobject.KomposeObject{
ServiceConfigs: make(map[string]kobject.ServiceConfig),
LoadedFrom: "compose",
}
// Step 2. Parse through the object and conver it to kobject.KomposeObject!
// Here we "clean up" the service configuration so we return something that includes
// all relevant information as well as avoid the unsupported keys as well.
for _, composeServiceConfig := range composeObject.Services {
// Standard import
// No need to modify before importation
name := composeServiceConfig.Name
serviceConfig := kobject.ServiceConfig{}
serviceConfig.Image = composeServiceConfig.Image
serviceConfig.WorkingDir = composeServiceConfig.WorkingDir
serviceConfig.Annotations = map[string]string(composeServiceConfig.Labels)
serviceConfig.CapAdd = composeServiceConfig.CapAdd
serviceConfig.CapDrop = composeServiceConfig.CapDrop
serviceConfig.Expose = composeServiceConfig.Expose
serviceConfig.Privileged = composeServiceConfig.Privileged
serviceConfig.Restart = composeServiceConfig.Restart
serviceConfig.User = composeServiceConfig.User
serviceConfig.Stdin = composeServiceConfig.StdinOpen
serviceConfig.Tty = composeServiceConfig.Tty
serviceConfig.TmpFs = composeServiceConfig.Tmpfs
serviceConfig.ContainerName = composeServiceConfig.ContainerName
serviceConfig.Command = composeServiceConfig.Entrypoint
serviceConfig.Args = composeServiceConfig.Command
// This is a bit messy since we use yaml.MemStringorInt
// TODO: Refactor yaml.MemStringorInt in kobject.go to int64
// Since Deploy.Resources.Limits does not initialize, we must check type Resources before continuing
if (composeServiceConfig.Deploy.Resources != types.Resources{}) {
serviceConfig.MemLimit = libcomposeyaml.MemStringorInt(composeServiceConfig.Deploy.Resources.Limits.MemoryBytes)
}
// POOF. volumes_From is gone in v3. docker/cli will error out of volumes_from is added in v3
// serviceConfig.VolumesFrom = composeServiceConfig.VolumesFrom
// TODO: Build is not yet supported, see:
// https://github.com/docker/cli/blob/master/cli/compose/types/types.go#L9
// We will have to *manually* add this / parse.
// serviceConfig.Build = composeServiceConfig.Build.Context
// serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
// Gather the environment values
// DockerCompose uses map[string]*string while we use []string
// So let's convert that using this hack
for name, value := range composeServiceConfig.Environment {
env := kobject.EnvVar{Name: name, Value: *value}
serviceConfig.Environment = append(serviceConfig.Environment, env)
}
// Parse the ports
// v3 uses a new format called "long syntax" starting in 3.2
// https://docs.docker.com/compose/compose-file/#ports
serviceConfig.Port = loadV3Ports(composeServiceConfig.Ports)
// Parse the volumes
// Again, in v3, we use the "long syntax" for volumes in terms of parsing
// https://docs.docker.com/compose/compose-file/#long-syntax-2
serviceConfig.Volumes = loadV3Volumes(composeServiceConfig.Volumes)
// Label handler
// Labels used to influence conversion of kompose will be handled
// from here for docker-compose. Each loader will have such handler.
for key, value := range composeServiceConfig.Labels {
switch key {
case "kompose.service.type":
serviceType, err := handleServiceType(value)
if err != nil {
return kobject.KomposeObject{}, errors.Wrap(err, "handleServiceType failed")
}
serviceConfig.ServiceType = serviceType
case "kompose.service.expose":
serviceConfig.ExposeService = strings.ToLower(value)
}
}
// Log if the name will been changed
if normalizeServiceNames(name) != name {
log.Infof("Service name in docker-compose has been changed from %q to %q", name, normalizeServiceNames(name))
}
// Final step, add to the array!
komposeObject.ServiceConfigs[normalizeServiceNames(name)] = serviceConfig
}
return komposeObject, nil
}

View File

@ -249,4 +249,29 @@ convert::expect_success "kompose --provider=openshift convert --stdout -j" "$KOM
# Return back to the original path
cd $CURRENT_DIR
# Test V3 Support of Docker Compose
# Test volumes are passed correctly
convert::expect_success "kompose convert --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose-volumes.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-volumes-k8s.json"
# Test environment variables are passed correctly
convert::expect_success "kompose convert --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose-env.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-env-k8s.json"
# Test that two files that are different versions fail
convert::expect_failure "kompose convert --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose.yaml -f $KOMPOSE_ROOT/script/test/fixtures/etherpad/docker-compose.yml"
# Kubernetes
convert::expect_success "kompose convert --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-k8s.json"
# OpenShift
convert::expect_success "kompose convert --provider=openshift --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-os.json"
# Test the "full example" from https://raw.githubusercontent.com/aanand/compose-file/master/loader/example1.env
# Kubernetes
convert::expect_success_and_warning "kompose convert --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose-full-example.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-k8s-full-example.json"
# Openshift
convert::expect_success_and_warning "kompose convert --provider=openshift --stdout -j -f $KOMPOSE_ROOT/script/test/fixtures/v3/docker-compose-full-example.yaml" "$KOMPOSE_ROOT/script/test/fixtures/v3/output-os-full-example.json"
exit $EXIT_STATUS

View File

@ -0,0 +1,6 @@
version: '3'
services:
foo:
image: foo/bar:latest
environment:
FOO: foo

View File

@ -0,0 +1,268 @@
version: "3"
services:
foo:
cap_add:
- ALL
cap_drop:
- NET_ADMIN
- SYS_ADMIN
cgroup_parent: m-executor-abcd
# String or list
command: bundle exec thin -p 3000
# command: ["bundle", "exec", "thin", "-p", "3000"]
container_name: my-web-container
depends_on:
- db
- redis
deploy:
mode: replicated
replicas: 6
labels: [FOO=BAR]
update_config:
parallelism: 3
delay: 10s
failure_action: continue
monitor: 60s
max_failure_ratio: 0.3
resources:
limits:
cpus: '0.001'
memory: 50M
reservations:
cpus: '0.0001'
memory: 20M
restart_policy:
condition: on_failure
delay: 5s
max_attempts: 3
window: 120s
placement:
constraints: [node=foo]
devices:
- "/dev/ttyUSB0:/dev/ttyUSB0"
# String or list
# dns: 8.8.8.8
dns:
- 8.8.8.8
- 9.9.9.9
# String or list
# dns_search: example.com
dns_search:
- dc1.example.com
- dc2.example.com
domainname: foo.com
# String or list
# entrypoint: /code/entrypoint.sh -p 3000
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
# Items can be strings or numbers
expose:
- "3000"
- 8000
external_links:
- redis_1
- project_db_1:mysql
- project_db_1:postgresql
# Mapping or list
# Mapping values must be strings
# extra_hosts:
# somehost: "162.242.195.82"
# otherhost: "50.31.209.229"
extra_hosts:
- "somehost:162.242.195.82"
- "otherhost:50.31.209.229"
hostname: foo
healthcheck:
test: echo "hello world"
interval: 10s
timeout: 1s
retries: 5
# Any valid image reference - repo, tag, id, sha
image: redis
# image: ubuntu:14.04
# image: tutum/influxdb
# image: example-registry.com:4000/postgresql
# image: a4bc65fd
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
ipc: host
# Mapping or list
# Mapping values can be strings, numbers or null
labels:
com.example.description: "Accounting webapp"
com.example.number: 42
com.example.empty-label:
# labels:
# - "com.example.description=Accounting webapp"
# - "com.example.number=42"
# - "com.example.empty-label"
links:
- db
- db:database
- redis
logging:
driver: syslog
options:
syslog-address: "tcp://192.168.0.42:123"
mac_address: 02:42:ac:11:65:43
# network_mode: "bridge"
# network_mode: "host"
# network_mode: "none"
# Use the network mode of an arbitrary container from another service
# network_mode: "service:db"
# Use the network mode of another container, specified by name or id
# network_mode: "container:some-container"
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
networks:
some-network:
aliases:
- alias1
- alias3
other-network:
ipv4_address: 172.16.238.10
ipv6_address: 2001:3984:3989::10
other-other-network:
pid: "host"
ports:
- 3000
- "3000-3005"
- "8000:8000"
- "9090-9091:8080-8081"
- "49100:22"
- "127.0.0.1:8001:8001"
- "127.0.0.1:5000-5010:5000-5010"
privileged: true
read_only: true
restart: always
security_opt:
- label=level:s0:c100,c200
- label=type:svirt_apache_t
stdin_open: true
stop_grace_period: 20s
stop_signal: SIGUSR1
# String or list
# tmpfs: /run
tmpfs:
- /run
- /tmp
tty: true
ulimits:
# Single number or mapping with soft + hard limits
nproc: 65535
nofile:
soft: 20000
hard: 40000
user: someone
volumes:
# Just specify a path and let the Engine create a volume
- /var/lib/mysql
# Specify an absolute path mapping
- /opt/data:/var/lib/mysql
# Path on the host, relative to the Compose file
- .:/code
- ./static:/var/www/html
# User-relative path
- ~/configs:/etc/configs/:ro
# Named volume
- datavolume:/var/lib/mysql
working_dir: /code
networks:
# Entries can be null, which specifies simply that a network
# called "{project name}_some-network" should be created and
# use the default driver
some-network:
other-network:
driver: overlay
driver_opts:
# Values can be strings or numbers
foo: "bar"
baz: 1
ipam:
driver: overlay
# driver_opts:
# # Values can be strings or numbers
# com.docker.network.enable_ipv6: "true"
# com.docker.network.numeric_value: 1
config:
- subnet: 172.16.238.0/24
# gateway: 172.16.238.1
- subnet: 2001:3984:3989::/64
# gateway: 2001:3984:3989::1
external-network:
# Specifies that a pre-existing network called "external-network"
# can be referred to within this file as "external-network"
external: true
other-external-network:
# Specifies that a pre-existing network called "my-cool-network"
# can be referred to within this file as "other-external-network"
external:
name: my-cool-network
volumes:
# Entries can be null, which specifies simply that a volume
# called "{project name}_some-volume" should be created and
# use the default driver
some-volume:
other-volume:
driver: flocker
driver_opts:
# Values can be strings or numbers
foo: "bar"
baz: 1
external-volume:
# Specifies that a pre-existing volume called "external-volume"
# can be referred to within this file as "external-volume"
external: true
other-external-volume:
# Specifies that a pre-existing volume called "my-cool-volume"
# can be referred to within this file as "other-external-volume"
external:
name: my-cool-volume

View File

@ -0,0 +1,8 @@
version: "3"
services:
foobar:
image: foo/bar:latest
volumes:
- /tmp/foo/bar

View File

@ -0,0 +1,24 @@
version: "3"
services:
redis-master:
image: gcr.io/google_containers/redis:e2e
ports:
- "6379"
redis-slave:
image: gcr.io/google_samples/gb-redisslave:v1
ports:
- "6379"
environment:
- GET_HOSTS_FROM=dns
frontend:
image: gcr.io/google-samples/gb-frontend:v4
ports:
- "80:80"
environment:
- GET_HOSTS_FROM=dns
labels:
kompose.service.type: LoadBalancer

8
script/test/fixtures/v3/example1.env vendored Normal file
View File

@ -0,0 +1,8 @@
# passed through
FOO=1
# overridden in example2.env
BAR=1
# overridden in full-example.yml
BAZ=1

1
script/test/fixtures/v3/example2.env vendored Normal file
View File

@ -0,0 +1 @@
BAR=2

View File

@ -0,0 +1,74 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"ports": [
{
"name": "headless",
"port": 55555,
"targetPort": 0
}
],
"selector": {
"io.kompose.service": "foo"
},
"clusterIP": "None"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"containers": [
{
"name": "foo",
"image": "foo/bar:latest",
"env": [
{
"name": "FOO",
"value": "foo"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
}
]
}

View File

@ -0,0 +1,509 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
},
"annotations": {
"com.example.description": "Accounting webapp",
"com.example.empty-label": "",
"com.example.number": "42"
}
},
"spec": {
"ports": [
{
"name": "3000",
"port": 3000,
"targetPort": 3000
},
{
"name": "3000",
"port": 3000,
"targetPort": 3000
},
{
"name": "3001",
"port": 3001,
"targetPort": 3001
},
{
"name": "3002",
"port": 3002,
"targetPort": 3002
},
{
"name": "3003",
"port": 3003,
"targetPort": 3003
},
{
"name": "3004",
"port": 3004,
"targetPort": 3004
},
{
"name": "3005",
"port": 3005,
"targetPort": 3005
},
{
"name": "8000",
"port": 8000,
"targetPort": 8000
},
{
"name": "9090",
"port": 9090,
"targetPort": 8080
},
{
"name": "9091",
"port": 9091,
"targetPort": 8081
},
{
"name": "49100",
"port": 49100,
"targetPort": 22
},
{
"name": "8001",
"port": 8001,
"targetPort": 8001
},
{
"name": "5000",
"port": 5000,
"targetPort": 5000
},
{
"name": "5001",
"port": 5001,
"targetPort": 5001
},
{
"name": "5002",
"port": 5002,
"targetPort": 5002
},
{
"name": "5003",
"port": 5003,
"targetPort": 5003
},
{
"name": "5004",
"port": 5004,
"targetPort": 5004
},
{
"name": "5005",
"port": 5005,
"targetPort": 5005
},
{
"name": "5006",
"port": 5006,
"targetPort": 5006
},
{
"name": "5007",
"port": 5007,
"targetPort": 5007
},
{
"name": "5008",
"port": 5008,
"targetPort": 5008
},
{
"name": "5009",
"port": 5009,
"targetPort": 5009
},
{
"name": "5010",
"port": 5010,
"targetPort": 5010
}
],
"selector": {
"io.kompose.service": "foo"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
},
"annotations": {
"com.example.description": "Accounting webapp",
"com.example.empty-label": "",
"com.example.number": "42"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"volumes": [
{
"name": "foo-claim0",
"persistentVolumeClaim": {
"claimName": "foo-claim0"
}
},
{
"name": "foo-claim1",
"persistentVolumeClaim": {
"claimName": "foo-claim1"
}
},
{
"name": "foo-claim2",
"persistentVolumeClaim": {
"claimName": "foo-claim2"
}
},
{
"name": "foo-claim3",
"persistentVolumeClaim": {
"claimName": "foo-claim3"
}
},
{
"name": "foo-claim4",
"persistentVolumeClaim": {
"claimName": "foo-claim4",
"readOnly": true
}
},
{
"name": "datavolume",
"persistentVolumeClaim": {
"claimName": "datavolume"
}
},
{
"name": "foo-tmpfs0",
"emptyDir": {
"medium": "Memory"
}
},
{
"name": "foo-tmpfs1",
"emptyDir": {
"medium": "Memory"
}
}
],
"containers": [
{
"name": "my-web-container",
"image": "redis",
"command": [
"/code/entrypoint.sh",
"-p",
"3000"
],
"args": [
"bundle",
"exec",
"thin",
"-p",
"3000"
],
"workingDir": "/code",
"ports": [
{
"containerPort": 3000
},
{
"containerPort": 3000
},
{
"containerPort": 3001
},
{
"containerPort": 3002
},
{
"containerPort": 3003
},
{
"containerPort": 3004
},
{
"containerPort": 3005
},
{
"containerPort": 8000
},
{
"containerPort": 8080
},
{
"containerPort": 8081
},
{
"containerPort": 22
},
{
"containerPort": 8001
},
{
"containerPort": 5000
},
{
"containerPort": 5001
},
{
"containerPort": 5002
},
{
"containerPort": 5003
},
{
"containerPort": 5004
},
{
"containerPort": 5005
},
{
"containerPort": 5006
},
{
"containerPort": 5007
},
{
"containerPort": 5008
},
{
"containerPort": 5009
},
{
"containerPort": 5010
}
],
"resources": {
"limits": {
"memory": "52428800"
}
},
"volumeMounts": [
{
"name": "foo-claim0",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-claim1",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-claim2",
"mountPath": "/code"
},
{
"name": "foo-claim3",
"mountPath": "/var/www/html"
},
{
"name": "foo-claim4",
"readOnly": true,
"mountPath": "/etc/configs/"
},
{
"name": "datavolume",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-tmpfs0",
"mountPath": "/run"
},
{
"name": "foo-tmpfs1",
"mountPath": "/tmp"
}
],
"securityContext": {
"capabilities": {
"add": [
"ALL"
],
"drop": [
"NET_ADMIN",
"SYS_ADMIN"
]
},
"privileged": true
},
"stdin": true,
"tty": true
}
],
"restartPolicy": "Always"
}
},
"strategy": {
"type": "Recreate"
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim0",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim0"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim1"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim2"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim3"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim4",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim4"
}
},
"spec": {
"accessModes": [
"ReadOnlyMany"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "datavolume",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "datavolume"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
}
]
}

221
script/test/fixtures/v3/output-k8s.json vendored Normal file
View File

@ -0,0 +1,221 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "frontend",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
},
"annotations": {
"kompose.service.type": "LoadBalancer"
}
},
"spec": {
"ports": [
{
"name": "80",
"port": 80,
"targetPort": 80
}
],
"selector": {
"io.kompose.service": "frontend"
},
"type": "LoadBalancer"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis-master",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis-master"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis-slave",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis-slave"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "frontend",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
},
"annotations": {
"kompose.service.type": "LoadBalancer"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
}
},
"spec": {
"containers": [
{
"name": "frontend",
"image": "gcr.io/google-samples/gb-frontend:v4",
"ports": [
{
"containerPort": 80
}
],
"env": [
{
"name": "GET_HOSTS_FROM",
"value": "dns"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "redis-master",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"containers": [
{
"name": "redis-master",
"image": "gcr.io/google_containers/redis:e2e",
"ports": [
{
"containerPort": 6379
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "redis-slave",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"containers": [
{
"name": "redis-slave",
"image": "gcr.io/google_samples/gb-redisslave:v1",
"ports": [
{
"containerPort": 6379
}
],
"env": [
{
"name": "GET_HOSTS_FROM",
"value": "dns"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
}
]
}

View File

@ -0,0 +1,560 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
},
"annotations": {
"com.example.description": "Accounting webapp",
"com.example.empty-label": "",
"com.example.number": "42"
}
},
"spec": {
"ports": [
{
"name": "3000",
"port": 3000,
"targetPort": 3000
},
{
"name": "3000",
"port": 3000,
"targetPort": 3000
},
{
"name": "3001",
"port": 3001,
"targetPort": 3001
},
{
"name": "3002",
"port": 3002,
"targetPort": 3002
},
{
"name": "3003",
"port": 3003,
"targetPort": 3003
},
{
"name": "3004",
"port": 3004,
"targetPort": 3004
},
{
"name": "3005",
"port": 3005,
"targetPort": 3005
},
{
"name": "8000",
"port": 8000,
"targetPort": 8000
},
{
"name": "9090",
"port": 9090,
"targetPort": 8080
},
{
"name": "9091",
"port": 9091,
"targetPort": 8081
},
{
"name": "49100",
"port": 49100,
"targetPort": 22
},
{
"name": "8001",
"port": 8001,
"targetPort": 8001
},
{
"name": "5000",
"port": 5000,
"targetPort": 5000
},
{
"name": "5001",
"port": 5001,
"targetPort": 5001
},
{
"name": "5002",
"port": 5002,
"targetPort": 5002
},
{
"name": "5003",
"port": 5003,
"targetPort": 5003
},
{
"name": "5004",
"port": 5004,
"targetPort": 5004
},
{
"name": "5005",
"port": 5005,
"targetPort": 5005
},
{
"name": "5006",
"port": 5006,
"targetPort": 5006
},
{
"name": "5007",
"port": 5007,
"targetPort": 5007
},
{
"name": "5008",
"port": 5008,
"targetPort": 5008
},
{
"name": "5009",
"port": 5009,
"targetPort": 5009
},
{
"name": "5010",
"port": 5010,
"targetPort": 5010
}
],
"selector": {
"io.kompose.service": "foo"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
},
"annotations": {
"com.example.description": "Accounting webapp",
"com.example.empty-label": "",
"com.example.number": "42"
}
},
"spec": {
"strategy": {
"type": "Recreate",
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"my-web-container"
],
"from": {
"kind": "ImageStreamTag",
"name": "foo:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "foo"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"volumes": [
{
"name": "foo-claim0",
"persistentVolumeClaim": {
"claimName": "foo-claim0"
}
},
{
"name": "foo-claim1",
"persistentVolumeClaim": {
"claimName": "foo-claim1"
}
},
{
"name": "foo-claim2",
"persistentVolumeClaim": {
"claimName": "foo-claim2"
}
},
{
"name": "foo-claim3",
"persistentVolumeClaim": {
"claimName": "foo-claim3"
}
},
{
"name": "foo-claim4",
"persistentVolumeClaim": {
"claimName": "foo-claim4",
"readOnly": true
}
},
{
"name": "datavolume",
"persistentVolumeClaim": {
"claimName": "datavolume"
}
},
{
"name": "foo-tmpfs0",
"emptyDir": {
"medium": "Memory"
}
},
{
"name": "foo-tmpfs1",
"emptyDir": {
"medium": "Memory"
}
}
],
"containers": [
{
"name": "my-web-container",
"image": " ",
"command": [
"/code/entrypoint.sh",
"-p",
"3000"
],
"args": [
"bundle",
"exec",
"thin",
"-p",
"3000"
],
"workingDir": "/code",
"ports": [
{
"containerPort": 3000
},
{
"containerPort": 3000
},
{
"containerPort": 3001
},
{
"containerPort": 3002
},
{
"containerPort": 3003
},
{
"containerPort": 3004
},
{
"containerPort": 3005
},
{
"containerPort": 8000
},
{
"containerPort": 8080
},
{
"containerPort": 8081
},
{
"containerPort": 22
},
{
"containerPort": 8001
},
{
"containerPort": 5000
},
{
"containerPort": 5001
},
{
"containerPort": 5002
},
{
"containerPort": 5003
},
{
"containerPort": 5004
},
{
"containerPort": 5005
},
{
"containerPort": 5006
},
{
"containerPort": 5007
},
{
"containerPort": 5008
},
{
"containerPort": 5009
},
{
"containerPort": 5010
}
],
"resources": {
"limits": {
"memory": "52428800"
}
},
"volumeMounts": [
{
"name": "foo-claim0",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-claim1",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-claim2",
"mountPath": "/code"
},
{
"name": "foo-claim3",
"mountPath": "/var/www/html"
},
{
"name": "foo-claim4",
"readOnly": true,
"mountPath": "/etc/configs/"
},
{
"name": "datavolume",
"mountPath": "/var/lib/mysql"
},
{
"name": "foo-tmpfs0",
"mountPath": "/run"
},
{
"name": "foo-tmpfs1",
"mountPath": "/tmp"
}
],
"securityContext": {
"capabilities": {
"add": [
"ALL"
],
"drop": [
"NET_ADMIN",
"SYS_ADMIN"
]
},
"privileged": true
},
"stdin": true,
"tty": true
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "redis"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim0",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim0"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim1"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim2"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim3"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foo-claim4",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo-claim4"
}
},
"spec": {
"accessModes": [
"ReadOnlyMany"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "datavolume",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "datavolume"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
}
]
}

377
script/test/fixtures/v3/output-os.json vendored Normal file
View File

@ -0,0 +1,377 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "frontend",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
},
"annotations": {
"kompose.service.type": "LoadBalancer"
}
},
"spec": {
"ports": [
{
"name": "80",
"port": 80,
"targetPort": 80
}
],
"selector": {
"io.kompose.service": "frontend"
},
"type": "LoadBalancer"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis-master",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis-master"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis-slave",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis-slave"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "frontend",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
},
"annotations": {
"kompose.service.type": "LoadBalancer"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"frontend"
],
"from": {
"kind": "ImageStreamTag",
"name": "frontend:v4"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "frontend"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
}
},
"spec": {
"containers": [
{
"name": "frontend",
"image": " ",
"ports": [
{
"containerPort": 80
}
],
"env": [
{
"name": "GET_HOSTS_FROM",
"value": "dns"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "frontend",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
}
},
"spec": {
"tags": [
{
"name": "v4",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "gcr.io/google-samples/gb-frontend:v4"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "redis-master",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"redis-master"
],
"from": {
"kind": "ImageStreamTag",
"name": "redis-master:e2e"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "redis-master"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"containers": [
{
"name": "redis-master",
"image": " ",
"ports": [
{
"containerPort": 6379
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "redis-master",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
},
"spec": {
"tags": [
{
"name": "e2e",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "gcr.io/google_containers/redis:e2e"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "redis-slave",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"redis-slave"
],
"from": {
"kind": "ImageStreamTag",
"name": "redis-slave:v1"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "redis-slave"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"containers": [
{
"name": "redis-slave",
"image": " ",
"ports": [
{
"containerPort": 6379
}
],
"env": [
{
"name": "GET_HOSTS_FROM",
"value": "dns"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "redis-slave",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
},
"spec": {
"tags": [
{
"name": "v1",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "gcr.io/google_samples/gb-redisslave:v1"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
}
]
}

View File

@ -0,0 +1,106 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foobar",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foobar"
}
},
"spec": {
"ports": [
{
"name": "headless",
"port": 55555,
"targetPort": 0
}
],
"selector": {
"io.kompose.service": "foobar"
},
"clusterIP": "None"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "foobar",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foobar"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foobar"
}
},
"spec": {
"volumes": [
{
"name": "foobar-claim0",
"persistentVolumeClaim": {
"claimName": "foobar-claim0"
}
}
],
"containers": [
{
"name": "foobar",
"image": "foo/bar:latest",
"resources": {},
"volumeMounts": [
{
"name": "foobar-claim0",
"mountPath": "/tmp/foo/bar"
}
]
}
],
"restartPolicy": "Always"
}
},
"strategy": {
"type": "Recreate"
}
},
"status": {}
},
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "foobar-claim0",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foobar-claim0"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "100Mi"
}
}
},
"status": {}
}
]
}

191
vendor/github.com/docker/cli/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/cli/NOTICE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -0,0 +1,96 @@
package interpolation
import (
"github.com/docker/cli/cli/compose/template"
"github.com/pkg/errors"
)
// Interpolate replaces variables in a string with the values from a mapping
func Interpolate(config map[string]interface{}, section string, mapping template.Mapping) (map[string]interface{}, error) {
out := map[string]interface{}{}
for name, item := range config {
if item == nil {
out[name] = nil
continue
}
mapItem, ok := item.(map[string]interface{})
if !ok {
return nil, errors.Errorf("Invalid type for %s : %T instead of %T", name, item, out)
}
interpolatedItem, err := interpolateSectionItem(name, mapItem, section, mapping)
if err != nil {
return nil, err
}
out[name] = interpolatedItem
}
return out, nil
}
func interpolateSectionItem(
name string,
item map[string]interface{},
section string,
mapping template.Mapping,
) (map[string]interface{}, error) {
out := map[string]interface{}{}
for key, value := range item {
interpolatedValue, err := recursiveInterpolate(value, mapping)
switch err := err.(type) {
case nil:
case *template.InvalidTemplateError:
return nil, errors.Errorf(
"Invalid interpolation format for %#v option in %s %#v: %#v. You may need to escape any $ with another $.",
key, section, name, err.Template,
)
default:
return nil, errors.Wrapf(err, "error while interpolating %s in %s %s", key, section, name)
}
out[key] = interpolatedValue
}
return out, nil
}
func recursiveInterpolate(
value interface{},
mapping template.Mapping,
) (interface{}, error) {
switch value := value.(type) {
case string:
return template.Substitute(value, mapping)
case map[string]interface{}:
out := map[string]interface{}{}
for key, elem := range value {
interpolatedElem, err := recursiveInterpolate(elem, mapping)
if err != nil {
return nil, err
}
out[key] = interpolatedElem
}
return out, nil
case []interface{}:
out := make([]interface{}, len(value))
for i, elem := range value {
interpolatedElem, err := recursiveInterpolate(elem, mapping)
if err != nil {
return nil, err
}
out[i] = interpolatedElem
}
return out, nil
default:
return value, nil
}
}

View File

@ -0,0 +1,712 @@
package loader
import (
"fmt"
"path"
"reflect"
"sort"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/cli/cli/compose/interpolation"
"github.com/docker/cli/cli/compose/schema"
"github.com/docker/cli/cli/compose/template"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/opts"
runconfigopts "github.com/docker/docker/runconfig/opts"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
shellwords "github.com/mattn/go-shellwords"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
// structure, and returns it.
func ParseYAML(source []byte) (map[string]interface{}, error) {
var cfg interface{}
if err := yaml.Unmarshal(source, &cfg); err != nil {
return nil, err
}
cfgMap, ok := cfg.(map[interface{}]interface{})
if !ok {
return nil, errors.Errorf("Top-level object must be a mapping")
}
converted, err := convertToStringKeysRecursive(cfgMap, "")
if err != nil {
return nil, err
}
return converted.(map[string]interface{}), nil
}
// Load reads a ConfigDetails and returns a fully loaded configuration
func Load(configDetails types.ConfigDetails) (*types.Config, error) {
if len(configDetails.ConfigFiles) < 1 {
return nil, errors.Errorf("No files specified")
}
if len(configDetails.ConfigFiles) > 1 {
return nil, errors.Errorf("Multiple files are not yet supported")
}
configDict := getConfigDict(configDetails)
if services, ok := configDict["services"]; ok {
if servicesDict, ok := services.(map[string]interface{}); ok {
forbidden := getProperties(servicesDict, types.ForbiddenProperties)
if len(forbidden) > 0 {
return nil, &ForbiddenPropertiesError{Properties: forbidden}
}
}
}
if err := schema.Validate(configDict, schema.Version(configDict)); err != nil {
return nil, err
}
cfg := types.Config{}
config, err := interpolateConfig(configDict, configDetails.LookupEnv)
if err != nil {
return nil, err
}
cfg.Services, err = LoadServices(config["services"], configDetails.WorkingDir, configDetails.LookupEnv)
if err != nil {
return nil, err
}
cfg.Networks, err = LoadNetworks(config["networks"])
if err != nil {
return nil, err
}
cfg.Volumes, err = LoadVolumes(config["volumes"])
if err != nil {
return nil, err
}
cfg.Secrets, err = LoadSecrets(config["secrets"], configDetails.WorkingDir)
if err != nil {
return nil, err
}
cfg.Configs, err = LoadConfigObjs(config["configs"], configDetails.WorkingDir)
if err != nil {
return nil, err
}
return &cfg, nil
}
func interpolateConfig(configDict map[string]interface{}, lookupEnv template.Mapping) (map[string]map[string]interface{}, error) {
config := make(map[string]map[string]interface{})
for _, key := range []string{"services", "networks", "volumes", "secrets", "configs"} {
section, ok := configDict[key]
if !ok {
config[key] = make(map[string]interface{})
continue
}
var err error
config[key], err = interpolation.Interpolate(section.(map[string]interface{}), key, lookupEnv)
if err != nil {
return nil, err
}
}
return config, nil
}
// GetUnsupportedProperties returns the list of any unsupported properties that are
// used in the Compose files.
func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
unsupported := map[string]bool{}
for _, service := range getServices(getConfigDict(configDetails)) {
serviceDict := service.(map[string]interface{})
for _, property := range types.UnsupportedProperties {
if _, isSet := serviceDict[property]; isSet {
unsupported[property] = true
}
}
}
return sortedKeys(unsupported)
}
func sortedKeys(set map[string]bool) []string {
var keys []string
for key := range set {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// GetDeprecatedProperties returns the list of any deprecated properties that
// are used in the compose files.
func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
}
func getProperties(services map[string]interface{}, propertyMap map[string]string) map[string]string {
output := map[string]string{}
for _, service := range services {
if serviceDict, ok := service.(map[string]interface{}); ok {
for property, description := range propertyMap {
if _, isSet := serviceDict[property]; isSet {
output[property] = description
}
}
}
}
return output
}
// ForbiddenPropertiesError is returned when there are properties in the Compose
// file that are forbidden.
type ForbiddenPropertiesError struct {
Properties map[string]string
}
func (e *ForbiddenPropertiesError) Error() string {
return "Configuration contains forbidden properties"
}
// TODO: resolve multiple files into a single config
func getConfigDict(configDetails types.ConfigDetails) map[string]interface{} {
return configDetails.ConfigFiles[0].Config
}
func getServices(configDict map[string]interface{}) map[string]interface{} {
if services, ok := configDict["services"]; ok {
if servicesDict, ok := services.(map[string]interface{}); ok {
return servicesDict
}
}
return map[string]interface{}{}
}
func transform(source map[string]interface{}, target interface{}) error {
data := mapstructure.Metadata{}
config := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(
transformHook,
mapstructure.StringToTimeDurationHookFunc()),
Result: target,
Metadata: &data,
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(source)
}
func transformHook(
source reflect.Type,
target reflect.Type,
data interface{},
) (interface{}, error) {
switch target {
case reflect.TypeOf(types.External{}):
return transformExternal(data)
case reflect.TypeOf(types.HealthCheckTest{}):
return transformHealthCheckTest(data)
case reflect.TypeOf(types.ShellCommand{}):
return transformShellCommand(data)
case reflect.TypeOf(types.StringList{}):
return transformStringList(data)
case reflect.TypeOf(map[string]string{}):
return transformMapStringString(data)
case reflect.TypeOf(types.UlimitsConfig{}):
return transformUlimits(data)
case reflect.TypeOf(types.UnitBytes(0)):
return transformSize(data)
case reflect.TypeOf([]types.ServicePortConfig{}):
return transformServicePort(data)
case reflect.TypeOf(types.ServiceSecretConfig{}):
return transformStringSourceMap(data)
case reflect.TypeOf(types.ServiceConfigObjConfig{}):
return transformStringSourceMap(data)
case reflect.TypeOf(types.StringOrNumberList{}):
return transformStringOrNumberList(data)
case reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}):
return transformServiceNetworkMap(data)
case reflect.TypeOf(types.MappingWithEquals{}):
return transformMappingOrList(data, "=", true), nil
case reflect.TypeOf(types.Labels{}):
return transformMappingOrList(data, "=", false), nil
case reflect.TypeOf(types.MappingWithColon{}):
return transformMappingOrList(data, ":", false), nil
case reflect.TypeOf(types.ServiceVolumeConfig{}):
return transformServiceVolumeConfig(data)
}
return data, nil
}
// keys needs to be converted to strings for jsonschema
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
if mapping, ok := value.(map[interface{}]interface{}); ok {
dict := make(map[string]interface{})
for key, entry := range mapping {
str, ok := key.(string)
if !ok {
return nil, formatInvalidKeyError(keyPrefix, key)
}
var newKeyPrefix string
if keyPrefix == "" {
newKeyPrefix = str
} else {
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
}
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
if err != nil {
return nil, err
}
dict[str] = convertedEntry
}
return dict, nil
}
if list, ok := value.([]interface{}); ok {
var convertedList []interface{}
for index, entry := range list {
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
if err != nil {
return nil, err
}
convertedList = append(convertedList, convertedEntry)
}
return convertedList, nil
}
return value, nil
}
func formatInvalidKeyError(keyPrefix string, key interface{}) error {
var location string
if keyPrefix == "" {
location = "at top level"
} else {
location = fmt.Sprintf("in %s", keyPrefix)
}
return errors.Errorf("Non-string key %s: %#v", location, key)
}
// LoadServices produces a ServiceConfig map from a compose file Dict
// the servicesDict is not validated if directly used. Use Load() to enable validation
func LoadServices(servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) {
var services []types.ServiceConfig
for name, serviceDef := range servicesDict {
serviceConfig, err := LoadService(name, serviceDef.(map[string]interface{}), workingDir, lookupEnv)
if err != nil {
return nil, err
}
services = append(services, *serviceConfig)
}
return services, nil
}
// LoadService produces a single ServiceConfig from a compose file Dict
// the serviceDict is not validated if directly used. Use Load() to enable validation
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
serviceConfig := &types.ServiceConfig{}
if err := transform(serviceDict, serviceConfig); err != nil {
return nil, err
}
serviceConfig.Name = name
if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil {
return nil, err
}
resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv)
return serviceConfig, nil
}
func updateEnvironment(environment map[string]*string, vars map[string]*string, lookupEnv template.Mapping) {
for k, v := range vars {
interpolatedV, ok := lookupEnv(k)
if (v == nil || *v == "") && ok {
// lookupEnv is prioritized over vars
environment[k] = &interpolatedV
} else {
environment[k] = v
}
}
}
func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error {
environment := make(map[string]*string)
if len(serviceConfig.EnvFile) > 0 {
var envVars []string
for _, file := range serviceConfig.EnvFile {
filePath := absPath(workingDir, file)
fileVars, err := runconfigopts.ParseEnvFile(filePath)
if err != nil {
return err
}
envVars = append(envVars, fileVars...)
}
updateEnvironment(environment,
runconfigopts.ConvertKVStringsToMapWithNil(envVars), lookupEnv)
}
updateEnvironment(environment, serviceConfig.Environment, lookupEnv)
serviceConfig.Environment = environment
return nil
}
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) {
for i, volume := range volumes {
if volume.Type != "bind" {
continue
}
volume.Source = absPath(workingDir, expandUser(volume.Source, lookupEnv))
volumes[i] = volume
}
}
// TODO: make this more robust
func expandUser(path string, lookupEnv template.Mapping) string {
if strings.HasPrefix(path, "~") {
home, ok := lookupEnv("HOME")
if !ok {
logrus.Warn("cannot expand '~', because the environment lacks HOME")
return path
}
return strings.Replace(path, "~", home, 1)
}
return path
}
func transformUlimits(data interface{}) (interface{}, error) {
switch value := data.(type) {
case int:
return types.UlimitsConfig{Single: value}, nil
case map[string]interface{}:
ulimit := types.UlimitsConfig{}
ulimit.Soft = value["soft"].(int)
ulimit.Hard = value["hard"].(int)
return ulimit, nil
default:
return data, errors.Errorf("invalid type %T for ulimits", value)
}
}
// LoadNetworks produces a NetworkConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadNetworks(source map[string]interface{}) (map[string]types.NetworkConfig, error) {
networks := make(map[string]types.NetworkConfig)
err := transform(source, &networks)
if err != nil {
return networks, err
}
for name, network := range networks {
if network.External.External && network.External.Name == "" {
network.External.Name = name
networks[name] = network
}
}
return networks, nil
}
func externalVolumeError(volume, key string) error {
return errors.Errorf(
"conflicting parameters \"external\" and %q specified for volume %q",
key, volume)
}
// LoadVolumes produces a VolumeConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) {
volumes := make(map[string]types.VolumeConfig)
err := transform(source, &volumes)
if err != nil {
return volumes, err
}
for name, volume := range volumes {
if volume.External.External {
if volume.Driver != "" {
return nil, externalVolumeError(name, "driver")
}
if len(volume.DriverOpts) > 0 {
return nil, externalVolumeError(name, "driver_opts")
}
if len(volume.Labels) > 0 {
return nil, externalVolumeError(name, "labels")
}
if volume.External.Name == "" {
volume.External.Name = name
volumes[name] = volume
}
}
}
return volumes, nil
}
// LoadSecrets produces a SecretConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadSecrets(source map[string]interface{}, workingDir string) (map[string]types.SecretConfig, error) {
secrets := make(map[string]types.SecretConfig)
if err := transform(source, &secrets); err != nil {
return secrets, err
}
for name, secret := range secrets {
if secret.External.External && secret.External.Name == "" {
secret.External.Name = name
secrets[name] = secret
}
if secret.File != "" {
secret.File = absPath(workingDir, secret.File)
}
}
return secrets, nil
}
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation
func LoadConfigObjs(source map[string]interface{}, workingDir string) (map[string]types.ConfigObjConfig, error) {
configs := make(map[string]types.ConfigObjConfig)
if err := transform(source, &configs); err != nil {
return configs, err
}
for name, config := range configs {
if config.External.External && config.External.Name == "" {
config.External.Name = name
configs[name] = config
}
if config.File != "" {
config.File = absPath(workingDir, config.File)
}
}
return configs, nil
}
func absPath(workingDir string, filepath string) string {
if path.IsAbs(filepath) {
return filepath
}
return path.Join(workingDir, filepath)
}
func transformMapStringString(data interface{}) (interface{}, error) {
switch value := data.(type) {
case map[string]interface{}:
return toMapStringString(value, false), nil
case map[string]string:
return value, nil
default:
return data, errors.Errorf("invalid type %T for map[string]string", value)
}
}
func transformExternal(data interface{}) (interface{}, error) {
switch value := data.(type) {
case bool:
return map[string]interface{}{"external": value}, nil
case map[string]interface{}:
return map[string]interface{}{"external": true, "name": value["name"]}, nil
default:
return data, errors.Errorf("invalid type %T for external", value)
}
}
func transformServicePort(data interface{}) (interface{}, error) {
switch entries := data.(type) {
case []interface{}:
// We process the list instead of individual items here.
// The reason is that one entry might be mapped to multiple ServicePortConfig.
// Therefore we take an input of a list and return an output of a list.
ports := []interface{}{}
for _, entry := range entries {
switch value := entry.(type) {
case int:
v, err := toServicePortConfigs(fmt.Sprint(value))
if err != nil {
return data, err
}
ports = append(ports, v...)
case string:
v, err := toServicePortConfigs(value)
if err != nil {
return data, err
}
ports = append(ports, v...)
case map[string]interface{}:
ports = append(ports, value)
default:
return data, errors.Errorf("invalid type %T for port", value)
}
}
return ports, nil
default:
return data, errors.Errorf("invalid type %T for port", entries)
}
}
func transformStringSourceMap(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return map[string]interface{}{"source": value}, nil
case map[string]interface{}:
return data, nil
default:
return data, errors.Errorf("invalid type %T for secret", value)
}
}
func transformServiceVolumeConfig(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return parseVolume(value)
case map[string]interface{}:
return data, nil
default:
return data, errors.Errorf("invalid type %T for service volume", value)
}
}
func transformServiceNetworkMap(value interface{}) (interface{}, error) {
if list, ok := value.([]interface{}); ok {
mapValue := map[interface{}]interface{}{}
for _, name := range list {
mapValue[name] = nil
}
return mapValue, nil
}
return value, nil
}
func transformStringOrNumberList(value interface{}) (interface{}, error) {
list := value.([]interface{})
result := make([]string, len(list))
for i, item := range list {
result[i] = fmt.Sprint(item)
}
return result, nil
}
func transformStringList(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return []string{value}, nil
case []interface{}:
return value, nil
default:
return data, errors.Errorf("invalid type %T for string list", value)
}
}
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
switch value := mappingOrList.(type) {
case map[string]interface{}:
return toMapStringString(value, allowNil)
case ([]interface{}):
result := make(map[string]interface{})
for _, value := range value {
parts := strings.SplitN(value.(string), sep, 2)
key := parts[0]
switch {
case len(parts) == 1 && allowNil:
result[key] = nil
case len(parts) == 1 && !allowNil:
result[key] = ""
default:
result[key] = parts[1]
}
}
return result
}
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
}
func transformShellCommand(value interface{}) (interface{}, error) {
if str, ok := value.(string); ok {
return shellwords.Parse(str)
}
return value, nil
}
func transformHealthCheckTest(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return append([]string{"CMD-SHELL"}, value), nil
case []interface{}:
return value, nil
default:
return value, errors.Errorf("invalid type %T for healthcheck.test", value)
}
}
func transformSize(value interface{}) (int64, error) {
switch value := value.(type) {
case int:
return int64(value), nil
case string:
return units.RAMInBytes(value)
}
panic(errors.Errorf("invalid type for size %T", value))
}
func toServicePortConfigs(value string) ([]interface{}, error) {
var portConfigs []interface{}
ports, portBindings, err := nat.ParsePortSpecs([]string{value})
if err != nil {
return nil, err
}
// We need to sort the key of the ports to make sure it is consistent
keys := []string{}
for port := range ports {
keys = append(keys, string(port))
}
sort.Strings(keys)
for _, key := range keys {
// Reuse ConvertPortToPortConfig so that it is consistent
portConfig, err := opts.ConvertPortToPortConfig(nat.Port(key), portBindings)
if err != nil {
return nil, err
}
for _, p := range portConfig {
portConfigs = append(portConfigs, types.ServicePortConfig{
Protocol: string(p.Protocol),
Target: p.TargetPort,
Published: p.PublishedPort,
Mode: string(p.PublishMode),
})
}
}
return portConfigs, nil
}
func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} {
output := make(map[string]interface{})
for key, value := range value {
output[key] = toString(value, allowNil)
}
return output
}
func toString(value interface{}, allowNil bool) interface{} {
switch {
case value != nil:
return fmt.Sprint(value)
case allowNil:
return nil
default:
return ""
}
}

View File

@ -0,0 +1,121 @@
package loader
import (
"strings"
"unicode"
"unicode/utf8"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/mount"
"github.com/pkg/errors"
)
func parseVolume(spec string) (types.ServiceVolumeConfig, error) {
volume := types.ServiceVolumeConfig{}
switch len(spec) {
case 0:
return volume, errors.New("invalid empty volume spec")
case 1, 2:
volume.Target = spec
volume.Type = string(mount.TypeVolume)
return volume, nil
}
buffer := []rune{}
for _, char := range spec {
switch {
case isWindowsDrive(char, buffer, volume):
buffer = append(buffer, char)
case char == ':':
if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
}
buffer = []rune{}
default:
buffer = append(buffer, char)
}
}
if err := populateFieldFromBuffer(rune(0), buffer, &volume); err != nil {
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
}
populateType(&volume)
return volume, nil
}
func isWindowsDrive(char rune, buffer []rune, volume types.ServiceVolumeConfig) bool {
return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
}
func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
strBuffer := string(buffer)
switch {
case len(buffer) == 0:
return errors.New("empty section between colons")
// Anonymous volume
case volume.Source == "" && char == rune(0):
volume.Target = strBuffer
return nil
case volume.Source == "":
volume.Source = strBuffer
return nil
case volume.Target == "":
volume.Target = strBuffer
return nil
case char == ':':
return errors.New("too many colons")
}
for _, option := range strings.Split(strBuffer, ",") {
switch option {
case "ro":
volume.ReadOnly = true
case "rw":
volume.ReadOnly = false
case "nocopy":
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
default:
if isBindOption(option) {
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
} else {
return errors.Errorf("unknown option: %s", option)
}
}
}
return nil
}
func isBindOption(option string) bool {
for _, propagation := range mount.Propagations {
if mount.Propagation(option) == propagation {
return true
}
}
return false
}
func populateType(volume *types.ServiceVolumeConfig) {
switch {
// Anonymous volume
case volume.Source == "":
volume.Type = string(mount.TypeVolume)
case isFilePath(volume.Source):
volume.Type = string(mount.TypeBind)
default:
volume.Type = string(mount.TypeVolume)
}
}
func isFilePath(source string) bool {
switch source[0] {
case '.', '/', '~':
return true
}
// Windows absolute path
first, next := utf8.DecodeRuneInString(source)
if unicode.IsLetter(first) && source[next] == ':' {
return true
}
return false
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,168 @@
package schema
//go:generate go-bindata -pkg schema -nometadata data
import (
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
)
const (
defaultVersion = "1.0"
versionField = "version"
)
type portsFormatChecker struct{}
func (checker portsFormatChecker) IsFormat(input string) bool {
// TODO: implement this
return true
}
type durationFormatChecker struct{}
func (checker durationFormatChecker) IsFormat(input string) bool {
_, err := time.ParseDuration(input)
return err == nil
}
func init() {
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
}
// Version returns the version of the config, defaulting to version 1.0
func Version(config map[string]interface{}) string {
version, ok := config[versionField]
if !ok {
return defaultVersion
}
return normalizeVersion(fmt.Sprintf("%v", version))
}
func normalizeVersion(version string) string {
switch version {
case "3":
return "3.0"
default:
return version
}
}
// Validate uses the jsonschema to validate the configuration
func Validate(config map[string]interface{}, version string) error {
schemaData, err := Asset(fmt.Sprintf("data/config_schema_v%s.json", version))
if err != nil {
return errors.Errorf("unsupported Compose file version: %s", version)
}
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
dataLoader := gojsonschema.NewGoLoader(config)
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
if err != nil {
return err
}
if !result.Valid() {
return toError(result)
}
return nil
}
func toError(result *gojsonschema.Result) error {
err := getMostSpecificError(result.Errors())
return err
}
const (
jsonschemaOneOf = "number_one_of"
jsonschemaAnyOf = "number_any_of"
)
func getDescription(err validationError) string {
switch err.parent.Type() {
case "invalid_type":
if expectedType, ok := err.parent.Details()["expected"].(string); ok {
return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
}
case jsonschemaOneOf, jsonschemaAnyOf:
if err.child == nil {
return err.parent.Description()
}
return err.child.Description()
}
return err.parent.Description()
}
func humanReadableType(definition string) string {
if definition[0:1] == "[" {
allTypes := strings.Split(definition[1:len(definition)-1], ",")
for i, t := range allTypes {
allTypes[i] = humanReadableType(t)
}
return fmt.Sprintf(
"%s or %s",
strings.Join(allTypes[0:len(allTypes)-1], ", "),
allTypes[len(allTypes)-1],
)
}
if definition == "object" {
return "mapping"
}
if definition == "array" {
return "list"
}
return definition
}
type validationError struct {
parent gojsonschema.ResultError
child gojsonschema.ResultError
}
func (err validationError) Error() string {
description := getDescription(err)
return fmt.Sprintf("%s %s", err.parent.Field(), description)
}
func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
mostSpecificError := 0
for i, err := range errors {
if specificity(err) > specificity(errors[mostSpecificError]) {
mostSpecificError = i
continue
}
if specificity(err) == specificity(errors[mostSpecificError]) {
// Invalid type errors win in a tie-breaker for most specific field name
if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
mostSpecificError = i
}
}
}
if mostSpecificError+1 == len(errors) {
return validationError{parent: errors[mostSpecificError]}
}
switch errors[mostSpecificError].Type() {
case "number_one_of", "number_any_of":
return validationError{
parent: errors[mostSpecificError],
child: errors[mostSpecificError+1],
}
default:
return validationError{parent: errors[mostSpecificError]}
}
}
func specificity(err gojsonschema.ResultError) int {
return len(strings.Split(err.Field(), "."))
}

View File

@ -0,0 +1,101 @@
package template
import (
"fmt"
"regexp"
"strings"
)
var delimiter = "\\$"
var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
var patternString = fmt.Sprintf(
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
delimiter, delimiter, substitution, substitution,
)
var pattern = regexp.MustCompile(patternString)
// InvalidTemplateError is returned when a variable template is not in a valid
// format
type InvalidTemplateError struct {
Template string
}
func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("Invalid template: %#v", e.Template)
}
// Mapping is a user-supplied function which maps from variable names to values.
// Returns the value as a string and a bool indicating whether
// the value is present, to distinguish between an empty string
// and the absence of a value.
type Mapping func(string) (string, bool)
// Substitute variables in the string with their values
func Substitute(template string, mapping Mapping) (string, error) {
var err error
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
matches := pattern.FindStringSubmatch(substring)
groups := make(map[string]string)
for i, name := range pattern.SubexpNames() {
if i != 0 {
groups[name] = matches[i]
}
}
substitution := groups["named"]
if substitution == "" {
substitution = groups["braced"]
}
if substitution != "" {
// Soft default (fall back if unset or empty)
if strings.Contains(substitution, ":-") {
name, defaultValue := partition(substitution, ":-")
value, ok := mapping(name)
if !ok || value == "" {
return defaultValue
}
return value
}
// Hard default (fall back if-and-only-if empty)
if strings.Contains(substitution, "-") {
name, defaultValue := partition(substitution, "-")
value, ok := mapping(name)
if !ok {
return defaultValue
}
return value
}
// No default (fall back to empty string)
value, ok := mapping(substitution)
if !ok {
return ""
}
return value
}
if escaped := groups["escaped"]; escaped != "" {
return escaped
}
err = &InvalidTemplateError{Template: template}
return ""
})
return result, err
}
// Split the string at the first occurrence of sep, and return the part before the separator,
// and the part after the separator.
//
// If the separator is not found, return the string itself, followed by an empty string.
func partition(s, sep string) (string, string) {
if strings.Contains(s, sep) {
parts := strings.SplitN(s, sep, 2)
return parts[0], parts[1]
}
return s, ""
}

337
vendor/github.com/docker/cli/cli/compose/types/types.go generated vendored Normal file
View File

@ -0,0 +1,337 @@
package types
import (
"time"
)
// UnsupportedProperties not yet supported by this implementation of the compose file
var UnsupportedProperties = []string{
"build",
"cap_add",
"cap_drop",
"cgroup_parent",
"devices",
"domainname",
"external_links",
"ipc",
"links",
"mac_address",
"network_mode",
"privileged",
"restart",
"security_opt",
"shm_size",
"stop_signal",
"sysctls",
"tmpfs",
"userns_mode",
}
// DeprecatedProperties that were removed from the v3 format, but their
// use should not impact the behaviour of the application.
var DeprecatedProperties = map[string]string{
"container_name": "Setting the container name is not supported.",
"expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
}
// ForbiddenProperties that are not supported in this implementation of the
// compose file.
var ForbiddenProperties = map[string]string{
"extends": "Support for `extends` is not implemented yet.",
"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
"volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
"cpu_quota": "Set resource limits using deploy.resources",
"cpu_shares": "Set resource limits using deploy.resources",
"cpuset": "Set resource limits using deploy.resources",
"mem_limit": "Set resource limits using deploy.resources",
"memswap_limit": "Set resource limits using deploy.resources",
}
// ConfigFile is a filename and the contents of the file as a Dict
type ConfigFile struct {
Filename string
Config map[string]interface{}
}
// ConfigDetails are the details about a group of ConfigFiles
type ConfigDetails struct {
WorkingDir string
ConfigFiles []ConfigFile
Environment map[string]string
}
// LookupEnv provides a lookup function for environment variables
func (cd ConfigDetails) LookupEnv(key string) (string, bool) {
v, ok := cd.Environment[key]
return v, ok
}
// Config is a full compose file configuration
type Config struct {
Services []ServiceConfig
Networks map[string]NetworkConfig
Volumes map[string]VolumeConfig
Secrets map[string]SecretConfig
Configs map[string]ConfigObjConfig
}
// ServiceConfig is the configuration of one service
type ServiceConfig struct {
Name string
CapAdd []string `mapstructure:"cap_add"`
CapDrop []string `mapstructure:"cap_drop"`
CgroupParent string `mapstructure:"cgroup_parent"`
Command ShellCommand
Configs []ServiceConfigObjConfig
ContainerName string `mapstructure:"container_name"`
CredentialSpec CredentialSpecConfig `mapstructure:"credential_spec"`
DependsOn []string `mapstructure:"depends_on"`
Deploy DeployConfig
Devices []string
DNS StringList
DNSSearch StringList `mapstructure:"dns_search"`
DomainName string `mapstructure:"domainname"`
Entrypoint ShellCommand
Environment MappingWithEquals
EnvFile StringList `mapstructure:"env_file"`
Expose StringOrNumberList
ExternalLinks []string `mapstructure:"external_links"`
ExtraHosts MappingWithColon `mapstructure:"extra_hosts"`
Hostname string
HealthCheck *HealthCheckConfig
Image string
Ipc string
Labels Labels
Links []string
Logging *LoggingConfig
MacAddress string `mapstructure:"mac_address"`
NetworkMode string `mapstructure:"network_mode"`
Networks map[string]*ServiceNetworkConfig
Pid string
Ports []ServicePortConfig
Privileged bool
ReadOnly bool `mapstructure:"read_only"`
Restart string
Secrets []ServiceSecretConfig
SecurityOpt []string `mapstructure:"security_opt"`
StdinOpen bool `mapstructure:"stdin_open"`
StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"`
StopSignal string `mapstructure:"stop_signal"`
Tmpfs StringList
Tty bool `mapstructure:"tty"`
Ulimits map[string]*UlimitsConfig
User string
Volumes []ServiceVolumeConfig
WorkingDir string `mapstructure:"working_dir"`
}
// ShellCommand is a string or list of string args
type ShellCommand []string
// StringList is a type for fields that can be a string or list of strings
type StringList []string
// StringOrNumberList is a type for fields that can be a list of strings or
// numbers
type StringOrNumberList []string
// MappingWithEquals is a mapping type that can be converted from a list of
// key[=value] strings.
// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`.
// For the key without value (`key`), the mapped value is set to nil.
type MappingWithEquals map[string]*string
// Labels is a mapping type for labels
type Labels map[string]string
// MappingWithColon is a mapping type that can be converted from a list of
// 'key: value' strings
type MappingWithColon map[string]string
// LoggingConfig the logging configuration for a service
type LoggingConfig struct {
Driver string
Options map[string]string
}
// DeployConfig the deployment configuration for a service
type DeployConfig struct {
Mode string
Replicas *uint64
Labels Labels
UpdateConfig *UpdateConfig `mapstructure:"update_config"`
Resources Resources
RestartPolicy *RestartPolicy `mapstructure:"restart_policy"`
Placement Placement
EndpointMode string `mapstructure:"endpoint_mode"`
}
// HealthCheckConfig the healthcheck configuration for a service
type HealthCheckConfig struct {
Test HealthCheckTest
Timeout string
Interval string
Retries *uint64
StartPeriod string
Disable bool
}
// HealthCheckTest is the command run to test the health of a service
type HealthCheckTest []string
// UpdateConfig the service update configuration
type UpdateConfig struct {
Parallelism *uint64
Delay time.Duration
FailureAction string `mapstructure:"failure_action"`
Monitor time.Duration
MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
}
// Resources the resource limits and reservations
type Resources struct {
Limits *Resource
Reservations *Resource
}
// Resource is a resource to be limited or reserved
type Resource struct {
// TODO: types to convert from units and ratios
NanoCPUs string `mapstructure:"cpus"`
MemoryBytes UnitBytes `mapstructure:"memory"`
}
// UnitBytes is the bytes type
type UnitBytes int64
// RestartPolicy the service restart policy
type RestartPolicy struct {
Condition string
Delay *time.Duration
MaxAttempts *uint64 `mapstructure:"max_attempts"`
Window *time.Duration
}
// Placement constraints for the service
type Placement struct {
Constraints []string
Preferences []PlacementPreferences
}
// PlacementPreferences is the preferences for a service placement
type PlacementPreferences struct {
Spread string
}
// ServiceNetworkConfig is the network configuration for a service
type ServiceNetworkConfig struct {
Aliases []string
Ipv4Address string `mapstructure:"ipv4_address"`
Ipv6Address string `mapstructure:"ipv6_address"`
}
// ServicePortConfig is the port configuration for a service
type ServicePortConfig struct {
Mode string
Target uint32
Published uint32
Protocol string
}
// ServiceVolumeConfig are references to a volume used by a service
type ServiceVolumeConfig struct {
Type string
Source string
Target string
ReadOnly bool `mapstructure:"read_only"`
Consistency string
Bind *ServiceVolumeBind
Volume *ServiceVolumeVolume
}
// ServiceVolumeBind are options for a service volume of type bind
type ServiceVolumeBind struct {
Propagation string
}
// ServiceVolumeVolume are options for a service volume of type volume
type ServiceVolumeVolume struct {
NoCopy bool `mapstructure:"nocopy"`
}
type fileReferenceConfig struct {
Source string
Target string
UID string
GID string
Mode *uint32
}
// ServiceConfigObjConfig is the config obj configuration for a service
type ServiceConfigObjConfig fileReferenceConfig
// ServiceSecretConfig is the secret configuration for a service
type ServiceSecretConfig fileReferenceConfig
// UlimitsConfig the ulimit configuration
type UlimitsConfig struct {
Single int
Soft int
Hard int
}
// NetworkConfig for a network
type NetworkConfig struct {
Driver string
DriverOpts map[string]string `mapstructure:"driver_opts"`
Ipam IPAMConfig
External External
Internal bool
Attachable bool
Labels Labels
}
// IPAMConfig for a network
type IPAMConfig struct {
Driver string
Config []*IPAMPool
}
// IPAMPool for a network
type IPAMPool struct {
Subnet string
}
// VolumeConfig for a volume
type VolumeConfig struct {
Driver string
DriverOpts map[string]string `mapstructure:"driver_opts"`
External External
Labels Labels
}
// External identifies a Volume or Network as a reference to a resource that is
// not managed, and should already exist.
type External struct {
Name string
External bool
}
// CredentialSpecConfig for credential spec on Windows
type CredentialSpecConfig struct {
File string
Registry string
}
type fileObjectConfig struct {
File string
External External
Labels Labels
}
// SecretConfig for a secret
type SecretConfig fileObjectConfig
// ConfigObjConfig is the config for the swarm "Config" object
type ConfigObjConfig fileObjectConfig

98
vendor/github.com/docker/cli/opts/config.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
swarmtypes "github.com/docker/docker/api/types/swarm"
)
// ConfigOpt is a Value type for parsing configs
type ConfigOpt struct {
values []*swarmtypes.ConfigReference
}
// Set a new config value
func (o *ConfigOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
options := &swarmtypes.ConfigReference{
File: &swarmtypes.ConfigReferenceFileTarget{
UID: "0",
GID: "0",
Mode: 0444,
},
}
// support a simple syntax of --config foo
if len(fields) == 1 {
options.File.Name = fields[0]
options.ConfigName = fields[0]
o.values = append(o.values, options)
return nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "source", "src":
options.ConfigName = value
case "target":
options.File.Name = value
case "uid":
options.File.UID = value
case "gid":
options.File.GID = value
case "mode":
m, err := strconv.ParseUint(value, 0, 32)
if err != nil {
return fmt.Errorf("invalid mode specified: %v", err)
}
options.File.Mode = os.FileMode(m)
default:
return fmt.Errorf("invalid field in config request: %s", key)
}
}
if options.ConfigName == "" {
return fmt.Errorf("source is required")
}
o.values = append(o.values, options)
return nil
}
// Type returns the type of this option
func (o *ConfigOpt) Type() string {
return "config"
}
// String returns a string repr of this option
func (o *ConfigOpt) String() string {
configs := []string{}
for _, config := range o.values {
repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name)
configs = append(configs, repr)
}
return strings.Join(configs, ", ")
}
// Value returns the config requests
func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference {
return o.values
}

64
vendor/github.com/docker/cli/opts/duration.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package opts
import (
"time"
"github.com/pkg/errors"
)
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
// It behave similarly to DurationOpt but only allows positive duration values.
type PositiveDurationOpt struct {
DurationOpt
}
// Set a new value on the option. Setting a negative duration value will cause
// an error to be returned.
func (d *PositiveDurationOpt) Set(s string) error {
err := d.DurationOpt.Set(s)
if err != nil {
return err
}
if *d.DurationOpt.value < 0 {
return errors.Errorf("duration cannot be negative")
}
return nil
}
// DurationOpt is an option type for time.Duration that uses a pointer. This
// allows us to get nil values outside, instead of defaulting to 0
type DurationOpt struct {
value *time.Duration
}
// NewDurationOpt creates a DurationOpt with the specified duration
func NewDurationOpt(value *time.Duration) *DurationOpt {
return &DurationOpt{
value: value,
}
}
// Set a new value on the option
func (d *DurationOpt) Set(s string) error {
v, err := time.ParseDuration(s)
d.value = &v
return err
}
// Type returns the type of this option, which will be displayed in `--help` output
func (d *DurationOpt) Type() string {
return "duration"
}
// String returns a string repr of this option
func (d *DurationOpt) String() string {
if d.value != nil {
return d.value.String()
}
return ""
}
// Value returns the time.Duration
func (d *DurationOpt) Value() *time.Duration {
return d.value
}

46
vendor/github.com/docker/cli/opts/env.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
package opts
import (
"fmt"
"os"
"runtime"
"strings"
)
// ValidateEnv validates an environment variable and returns it.
// If no value is specified, it returns the current value using os.Getenv.
//
// As on ParseEnvFile and related to #16585, environment variable names
// are not validate what so ever, it's up to application inside docker
// to validate them or not.
//
// The only validation here is to check if name is empty, per #25099
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if arr[0] == "" {
return "", fmt.Errorf("invalid environment variable: %s", val)
}
if len(arr) > 1 {
return val, nil
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if runtime.GOOS == "windows" {
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
if strings.EqualFold(parts[0], name) {
return true
}
}
if parts[0] == name {
return true
}
}
return false
}

View File

@ -9,7 +9,7 @@ import (
)
var (
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
@ -37,7 +37,7 @@ func ValidateHost(val string) (string, error) {
}
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for tls
// we need to know what the user entered later (using ParseHost) to adjust for TLS
return val, nil
}
@ -149,3 +149,17 @@ func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
}
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}

174
vendor/github.com/docker/cli/opts/mount.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/go-units"
)
// MountOpt is a Value type for parsing mounts
type MountOpt struct {
values []mounttypes.Mount
}
// Set a new mount value
// nolint: gocyclo
func (m *MountOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
mount := mounttypes.Mount{}
volumeOptions := func() *mounttypes.VolumeOptions {
if mount.VolumeOptions == nil {
mount.VolumeOptions = &mounttypes.VolumeOptions{
Labels: make(map[string]string),
}
}
if mount.VolumeOptions.DriverConfig == nil {
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
}
return mount.VolumeOptions
}
bindOptions := func() *mounttypes.BindOptions {
if mount.BindOptions == nil {
mount.BindOptions = new(mounttypes.BindOptions)
}
return mount.BindOptions
}
tmpfsOptions := func() *mounttypes.TmpfsOptions {
if mount.TmpfsOptions == nil {
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
}
return mount.TmpfsOptions
}
setValueOnMap := func(target map[string]string, value string) {
parts := strings.SplitN(value, "=", 2)
if len(parts) == 1 {
target[value] = ""
} else {
target[parts[0]] = parts[1]
}
}
mount.Type = mounttypes.TypeVolume // default to volume mounts
// Set writable as the default
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) == 1 {
switch key {
case "readonly", "ro":
mount.ReadOnly = true
continue
case "volume-nocopy":
volumeOptions().NoCopy = true
continue
}
}
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "type":
mount.Type = mounttypes.Type(strings.ToLower(value))
case "source", "src":
mount.Source = value
case "target", "dst", "destination":
mount.Target = value
case "readonly", "ro":
mount.ReadOnly, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
case "consistency":
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
case "bind-propagation":
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
case "volume-nocopy":
volumeOptions().NoCopy, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for volume-nocopy: %s", value)
}
case "volume-label":
setValueOnMap(volumeOptions().Labels, value)
case "volume-driver":
volumeOptions().DriverConfig.Name = value
case "volume-opt":
if volumeOptions().DriverConfig.Options == nil {
volumeOptions().DriverConfig.Options = make(map[string]string)
}
setValueOnMap(volumeOptions().DriverConfig.Options, value)
case "tmpfs-size":
sizeBytes, err := units.RAMInBytes(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().SizeBytes = sizeBytes
case "tmpfs-mode":
ui64, err := strconv.ParseUint(value, 8, 32)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().Mode = os.FileMode(ui64)
default:
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
if mount.Type == "" {
return fmt.Errorf("type is required")
}
if mount.Target == "" {
return fmt.Errorf("target is required")
}
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
}
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
}
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
}
m.values = append(m.values, mount)
return nil
}
// Type returns the type of this option
func (m *MountOpt) Type() string {
return "mount"
}
// String returns a string repr of this option
func (m *MountOpt) String() string {
mounts := []string{}
for _, mount := range m.values {
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
mounts = append(mounts, repr)
}
return strings.Join(mounts, ", ")
}
// Value returns the mounts
func (m *MountOpt) Value() []mounttypes.Mount {
return m.values
}

106
vendor/github.com/docker/cli/opts/network.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package opts
import (
"encoding/csv"
"fmt"
"regexp"
"strings"
)
const (
networkOptName = "name"
networkOptAlias = "alias"
driverOpt = "driver-opt"
)
// NetworkAttachmentOpts represents the network options for endpoint creation
type NetworkAttachmentOpts struct {
Target string
Aliases []string
DriverOpts map[string]string
}
// NetworkOpt represents a network config in swarm mode.
type NetworkOpt struct {
options []NetworkAttachmentOpts
}
// Set networkopts value
func (n *NetworkOpt) Set(value string) error {
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
if err != nil {
return err
}
var netOpt NetworkAttachmentOpts
if longSyntax {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
netOpt.Aliases = []string{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) < 2 {
return fmt.Errorf("invalid field %s", field)
}
key := strings.TrimSpace(strings.ToLower(parts[0]))
value := strings.TrimSpace(strings.ToLower(parts[1]))
switch key {
case networkOptName:
netOpt.Target = value
case networkOptAlias:
netOpt.Aliases = append(netOpt.Aliases, value)
case driverOpt:
key, value, err = parseDriverOpt(value)
if err == nil {
if netOpt.DriverOpts == nil {
netOpt.DriverOpts = make(map[string]string)
}
netOpt.DriverOpts[key] = value
} else {
return err
}
default:
return fmt.Errorf("invalid field key %s", key)
}
}
if len(netOpt.Target) == 0 {
return fmt.Errorf("network name/id is not specified")
}
} else {
netOpt.Target = value
}
n.options = append(n.options, netOpt)
return nil
}
// Type returns the type of this option
func (n *NetworkOpt) Type() string {
return "network"
}
// Value returns the networkopts
func (n *NetworkOpt) Value() []NetworkAttachmentOpts {
return n.options
}
// String returns the network opts as a string
func (n *NetworkOpt) String() string {
return ""
}
func parseDriverOpt(driverOpt string) (key string, value string, err error) {
parts := strings.SplitN(driverOpt, "=", 2)
if len(parts) != 2 {
err = fmt.Errorf("invalid key value pair format in driver options")
}
key = strings.TrimSpace(strings.ToLower(parts[0]))
value = strings.TrimSpace(strings.ToLower(parts[1]))
return
}

View File

@ -2,11 +2,14 @@ package opts
import (
"fmt"
"math/big"
"net"
"path"
"regexp"
"strings"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
)
var (
@ -35,7 +38,10 @@ func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
if len(*opts.values) == 0 {
return ""
}
return fmt.Sprintf("%v", *opts.values)
}
// Set validates if needed the input value and adds it to the
@ -107,6 +113,12 @@ func (opts *ListOpts) Type() string {
return "list"
}
// WithValidator returns the ListOpts with validator set.
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
opts.validator = validator
return opts
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
@ -224,6 +236,15 @@ func ValidateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
@ -319,3 +340,149 @@ func (o *FilterOpt) Type() string {
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
// NanoCPUs is a type for fixed point fractional number.
type NanoCPUs int64
// String returns the string format of the number
func (c *NanoCPUs) String() string {
if *c == 0 {
return ""
}
return big.NewRat(c.Value(), 1e9).FloatString(3)
}
// Set sets the value of the NanoCPU by passing a string
func (c *NanoCPUs) Set(value string) error {
cpus, err := ParseCPUs(value)
*c = NanoCPUs(cpus)
return err
}
// Type returns the type
func (c *NanoCPUs) Type() string {
return "decimal"
}
// Value returns the value in int64
func (c *NanoCPUs) Value() int64 {
return int64(*c)
}
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
func ParseCPUs(value string) (int64, error) {
cpu, ok := new(big.Rat).SetString(value)
if !ok {
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
}
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
if !nano.IsInt() {
return 0, fmt.Errorf("value is too precise")
}
return nano.Num().Int64(), nil
}
// ParseLink parses and validates the specified string as a link format (name:alias)
func ParseLink(val string) (string, string, error) {
if val == "" {
return "", "", fmt.Errorf("empty string specified for links")
}
arr := strings.Split(val, ":")
if len(arr) > 2 {
return "", "", fmt.Errorf("bad format for links: %s", val)
}
if len(arr) == 1 {
return val, val, nil
}
// This is kept because we can actually get a HostConfig with links
// from an already created container and the format is not `foo:bar`
// but `/foo:/c1/bar`
if strings.HasPrefix(arr[0], "/") {
_, alias := path.Split(arr[1])
return arr[0][1:], alias, nil
}
return arr[0], arr[1], nil
}
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
_, _, err := ParseLink(val)
return val, err
}
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
type MemBytes int64
// String returns the string format of the human readable memory bytes
func (m *MemBytes) String() string {
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
// We return "0" in case value is 0 here so that the default value is hidden.
// (Sometimes "default 0 B" is actually misleading)
if m.Value() != 0 {
return units.BytesSize(float64(m.Value()))
}
return "0"
}
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalJSON(s []byte) error {
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
return fmt.Errorf("invalid size: %q", s)
}
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
*m = MemBytes(val)
return err
}
// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc).
// It differs from MemBytes in that -1 is valid and the default.
type MemSwapBytes int64
// Set sets the value of the MemSwapBytes by passing a string
func (m *MemSwapBytes) Set(value string) error {
if value == "-1" {
*m = MemSwapBytes(-1)
return nil
}
val, err := units.RAMInBytes(value)
*m = MemSwapBytes(val)
return err
}
// Type returns the type
func (m *MemSwapBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemSwapBytes) Value() int64 {
return int64(*m)
}
func (m *MemSwapBytes) String() string {
b := MemBytes(*m)
return b.String()
}
// UnmarshalJSON is the customized unmarshaler for MemSwapBytes
func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
b := MemBytes(*m)
return b.UnmarshalJSON(s)
}

View File

@ -2,5 +2,5 @@
package opts
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
const DefaultHTTPHost = "localhost"

View File

@ -52,5 +52,5 @@ package opts
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
// explicitly.
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
const DefaultHTTPHost = "127.0.0.1"

163
vendor/github.com/docker/cli/opts/port.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package opts
import (
"encoding/csv"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
const (
portOptTargetPort = "target"
portOptPublishedPort = "published"
portOptProtocol = "protocol"
portOptMode = "mode"
)
// PortOpt represents a port config in swarm mode.
type PortOpt struct {
ports []swarm.PortConfig
}
// Set a new port value
// nolint: gocyclo
func (p *PortOpt) Set(value string) error {
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
if err != nil {
return err
}
if longSyntax {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
pConfig := swarm.PortConfig{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("invalid field %s", field)
}
key := strings.ToLower(parts[0])
value := strings.ToLower(parts[1])
switch key {
case portOptProtocol:
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
return fmt.Errorf("invalid protocol value %s", value)
}
pConfig.Protocol = swarm.PortConfigProtocol(value)
case portOptMode:
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
return fmt.Errorf("invalid publish mode value %s", value)
}
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
case portOptTargetPort:
tPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.TargetPort = uint32(tPort)
case portOptPublishedPort:
pPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.PublishedPort = uint32(pPort)
default:
return fmt.Errorf("invalid field key %s", key)
}
}
if pConfig.TargetPort == 0 {
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
}
if pConfig.PublishMode == "" {
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
}
if pConfig.Protocol == "" {
pConfig.Protocol = swarm.PortConfigProtocolTCP
}
p.ports = append(p.ports, pConfig)
} else {
// short syntax
portConfigs := []swarm.PortConfig{}
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
if err != nil {
return err
}
for _, portBindings := range portBindingMap {
for _, portBinding := range portBindings {
if portBinding.HostIP != "" {
return fmt.Errorf("hostip is not supported")
}
}
}
for port := range ports {
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
if err != nil {
return err
}
portConfigs = append(portConfigs, portConfig...)
}
p.ports = append(p.ports, portConfigs...)
}
return nil
}
// Type returns the type of this option
func (p *PortOpt) Type() string {
return "port"
}
// String returns a string repr of this option
func (p *PortOpt) String() string {
ports := []string{}
for _, port := range p.ports {
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
ports = append(ports, repr)
}
return strings.Join(ports, ", ")
}
// Value returns the ports
func (p *PortOpt) Value() []swarm.PortConfig {
return p.ports
}
// ConvertPortToPortConfig converts ports to the swarm type
func ConvertPortToPortConfig(
port nat.Port,
portBindings map[nat.Port][]nat.PortBinding,
) ([]swarm.PortConfig, error) {
ports := []swarm.PortConfig{}
for _, binding := range portBindings[port] {
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
if err != nil && binding.HostPort != "" {
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
}
ports = append(ports, swarm.PortConfig{
//TODO Name: ?
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
TargetPort: uint32(port.Int()),
PublishedPort: uint32(hostPort),
PublishMode: swarm.PortConfigPublishModeIngress,
})
}
return ports, nil
}

37
vendor/github.com/docker/cli/opts/quotedstring.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package opts
// QuotedString is a string that may have extra quotes around the value. The
// quotes are stripped from the value.
type QuotedString struct {
value *string
}
// Set sets a new value
func (s *QuotedString) Set(val string) error {
*s.value = trimQuotes(val)
return nil
}
// Type returns the type of the value
func (s *QuotedString) Type() string {
return "string"
}
func (s *QuotedString) String() string {
return string(*s.value)
}
func trimQuotes(value string) string {
lastIndex := len(value) - 1
for _, char := range []byte{'\'', '"'} {
if value[0] == char && value[lastIndex] == char {
return value[1:lastIndex]
}
}
return value
}
// NewQuotedString returns a new quoted string option
func NewQuotedString(value *string) *QuotedString {
return &QuotedString{value: value}
}

98
vendor/github.com/docker/cli/opts/secret.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
swarmtypes "github.com/docker/docker/api/types/swarm"
)
// SecretOpt is a Value type for parsing secrets
type SecretOpt struct {
values []*swarmtypes.SecretReference
}
// Set a new secret value
func (o *SecretOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
options := &swarmtypes.SecretReference{
File: &swarmtypes.SecretReferenceFileTarget{
UID: "0",
GID: "0",
Mode: 0444,
},
}
// support a simple syntax of --secret foo
if len(fields) == 1 {
options.File.Name = fields[0]
options.SecretName = fields[0]
o.values = append(o.values, options)
return nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "source", "src":
options.SecretName = value
case "target":
options.File.Name = value
case "uid":
options.File.UID = value
case "gid":
options.File.GID = value
case "mode":
m, err := strconv.ParseUint(value, 0, 32)
if err != nil {
return fmt.Errorf("invalid mode specified: %v", err)
}
options.File.Mode = os.FileMode(m)
default:
return fmt.Errorf("invalid field in secret request: %s", key)
}
}
if options.SecretName == "" {
return fmt.Errorf("source is required")
}
o.values = append(o.values, options)
return nil
}
// Type returns the type of this option
func (o *SecretOpt) Type() string {
return "secret"
}
// String returns a string repr of this option
func (o *SecretOpt) String() string {
secrets := []string{}
for _, secret := range o.values {
repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
secrets = append(secrets, repr)
}
return strings.Join(secrets, ", ")
}
// Value returns the secret requests
func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
return o.values
}

View File

@ -100,14 +100,12 @@ func (opt *ThrottledeviceOpt) String() string {
// GetList returns a slice of pointers to ThrottleDevices.
func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
var throttledevice []*blkiodev.ThrottleDevice
for _, v := range opt.values {
throttledevice = append(throttledevice, v)
}
throttledevice = append(throttledevice, opt.values...)
return throttledevice
}
// Type returns the option type
func (opt *ThrottledeviceOpt) Type() string {
return "throttled-device"
return "list"
}

View File

@ -85,5 +85,5 @@ func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
// Type returns the option type
func (opt *WeightdeviceOpt) Type() string {
return "weighted-device"
return "list"
}

View File

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
Docker
Copyright 2012-2016 Docker, Inc.
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).

View File

@ -13,9 +13,21 @@ import (
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
type CheckpointCreateOptions struct {
CheckpointID string
CheckpointDir string
Exit bool
}
// CheckpointListOptions holds parameters to list checkpoints for a container
type CheckpointListOptions struct {
CheckpointDir string
}
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
type CheckpointDeleteOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerAttachOptions holds parameters to attach to a container.
type ContainerAttachOptions struct {
Stream bool
@ -23,6 +35,7 @@ type ContainerAttachOptions struct {
Stdout bool
Stderr bool
DetachKeys string
Logs bool
}
// ContainerCommitOptions holds parameters to commit changes into a container.
@ -41,6 +54,7 @@ type ContainerExecInspect struct {
ContainerID string
Running bool
ExitCode int
Pid int
}
// ContainerListOptions holds parameters to list containers with.
@ -52,7 +66,7 @@ type ContainerListOptions struct {
Since string
Before string
Limit int
Filter filters.Args
Filters filters.Args
}
// ContainerLogsOptions holds parameters to filter logs with.
@ -76,12 +90,14 @@ type ContainerRemoveOptions struct {
// ContainerStartOptions holds parameters to start containers.
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
CopyUIDGID bool
}
// EventsOptions holds parameters to filter events with.
@ -140,10 +156,15 @@ type ImageBuildOptions struct {
Memory int64
MemorySwap int64
CgroupParent string
NetworkMode string
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
BuildArgs map[string]string
// BuildArgs needs to be a *string instead of just a string so that
// we can tell the difference between "" (empty string) and no value
// at all (nil). See the parsing of buildArgs in
// api/server/router/build/build_routes.go for even more info.
BuildArgs map[string]*string
AuthConfigs map[string]AuthConfig
Context io.Reader
Labels map[string]string
@ -154,6 +175,9 @@ type ImageBuildOptions struct {
// CacheFrom specifies images that are used for matching cache. Images
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
ExtraHosts []string // List of extra hosts
Target string
}
// ImageBuildResponse holds information
@ -171,8 +195,8 @@ type ImageCreateOptions struct {
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
}
// ImageImportOptions holds information to import images from the client host.
@ -184,7 +208,6 @@ type ImageImportOptions struct {
// ImageListOptions holds parameters to filter the list of images with.
type ImageListOptions struct {
MatchName string
All bool
Filters filters.Args
}
@ -236,21 +259,9 @@ type ResizeOptions struct {
Width uint
}
// VersionResponse holds version information for the client and the server
type VersionResponse struct {
Client *Version
Server *Version
}
// ServerOK returns true when the client could connect to the docker server
// and parse the information received. It returns false otherwise.
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filter filters.Args
Filters filters.Args
}
// NodeRemoveOptions holds parameters to remove nodes with.
@ -265,6 +276,12 @@ type ServiceCreateOptions struct {
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceCreateResponse contains the information returned to a client
@ -272,8 +289,16 @@ type ServiceCreateOptions struct {
type ServiceCreateResponse struct {
// ID is the ID of the created service.
ID string
// Warnings is a set of non-fatal warning messages to pass on to the user.
Warnings []string `json:",omitempty"`
}
// Values for RegistryAuthFrom in ServiceUpdateOptions
const (
RegistryAuthFromSpec = "spec"
RegistryAuthFromPreviousSpec = "previous-spec"
)
// ServiceUpdateOptions contains the options to be used for updating services.
type ServiceUpdateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
@ -285,19 +310,75 @@ type ServiceUpdateOptions struct {
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
// into this field. While it does open API users up to racy writes, most
// users may not need that level of consistency in practice.
// RegistryAuthFrom specifies where to find the registry authorization
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
// Rollback indicates whether a server-side rollback should be
// performed. When this is set, the provided spec will be ignored.
// The valid values are "previous" and "none". An empty value is the
// same as "none".
Rollback string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filter filters.Args
Filters filters.Args
}
// ServiceInspectOptions holds parameters related to the "service inspect"
// operation.
type ServiceInspectOptions struct {
InsertDefaults bool
}
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filter filters.Args
Filters filters.Args
}
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginDisableOptions holds parameters to disable plugins.
type PluginDisableOptions struct {
Force bool
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
Args []string
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
}

View File

@ -53,9 +53,17 @@ type ExecConfig struct {
Cmd []string // Execution commands and args
}
// PluginRmConfig holds arguments for the plugin remove
// operation. This struct is used to tell the backend what operations
// to perform.
// PluginRmConfig holds arguments for plugin remove.
type PluginRmConfig struct {
ForceRemove bool
}
// PluginEnableConfig holds arguments for plugin enable
type PluginEnableConfig struct {
Timeout int
}
// PluginDisableConfig holds arguments for plugin disable.
type PluginDisableConfig struct {
ForceDisable bool
}

View File

@ -7,6 +7,12 @@ import (
"github.com/docker/go-connections/nat"
)
// MinimumDuration puts a minimum on user configured duration.
// This is to prevent API error on time unit. For example, API may
// set 3 as healthcheck interval with intention of 3 seconds, but
// Docker interprets it as 3 nanoseconds.
const MinimumDuration = 1 * time.Millisecond
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
@ -21,6 +27,7 @@ type HealthConfig struct {
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
@ -40,7 +47,7 @@ type Config struct {
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.

View File

@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerChangeResponseItem container change response item
// swagger:model ContainerChangeResponseItem
type ContainerChangeResponseItem struct {
// Kind of change
// Required: true
Kind uint8 `json:"Kind"`
// Path to file that has changed
// Required: true
Path string `json:"Path"`
}

View File

@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerCreateCreatedBody container create created body
// swagger:model ContainerCreateCreatedBody
type ContainerCreateCreatedBody struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerTopOKBody container top o k body
// swagger:model ContainerTopOKBody
type ContainerTopOKBody struct {
// Each process running in the container, where each is process is an array of values corresponding to the titles
// Required: true
Processes [][]string `json:"Processes"`
// The ps column titles
// Required: true
Titles []string `json:"Titles"`
}

View File

@ -0,0 +1,17 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerUpdateOKBody container update o k body
// swagger:model ContainerUpdateOKBody
type ContainerUpdateOKBody struct {
// warnings
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,17 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerWaitOKBody container wait o k body
// swagger:model ContainerWaitOKBody
type ContainerWaitOKBody struct {
// Exit code of the container
// Required: true
StatusCode int64 `json:"StatusCode"`
}

View File

@ -10,9 +10,6 @@ import (
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
@ -66,6 +63,47 @@ func (n IpcMode) Container() string {
return ""
}
// NetworkMode represents the container network stack.
type NetworkMode string
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
@ -223,6 +261,17 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogMode is a type to define the available modes for logging
// These modes affect how logs are handled when log messages start piling up.
type LogMode string
// Available logging modes
const (
LogModeUnset = ""
LogModeBlocking LogMode = "blocking"
LogModeNonBlock LogMode = "non-blocking"
)
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
@ -234,6 +283,7 @@ type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
@ -245,9 +295,12 @@ type Resources struct {
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DeviceCgroupRules []string // List of rule to be added to the device cgroup
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
@ -314,7 +367,7 @@ type HostConfig struct {
// Applicable to Windows
ConsoleSize [2]uint // Initial console size (height,width)
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
@ -324,7 +377,4 @@ type HostConfig struct {
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View File

@ -2,23 +2,11 @@
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
@ -47,35 +35,7 @@ func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -4,22 +4,6 @@ import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
@ -32,20 +16,9 @@ func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
@ -71,17 +44,11 @@ func (n NetworkMode) NetworkName() string {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsContainer() {
return "container"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -0,0 +1,22 @@
package container
// WaitCondition is a type used to specify a container state for which
// to wait.
type WaitCondition string
// Possible WaitCondition Values.
//
// WaitConditionNotRunning (default) is used to wait for any of the non-running
// states: "created", "exited", "dead", "removing", or "removed".
//
// WaitConditionNextExit is used to wait for the next time the state changes
// to a non-running state. If the state is currently "created" or "exited",
// this would cause Wait() to block until either the container runs and exits
// or is removed.
//
// WaitConditionRemoved is used to wait for the container to be removed.
const (
WaitConditionNotRunning WaitCondition = "not-running"
WaitConditionNextExit WaitCondition = "next-exit"
WaitConditionRemoved WaitCondition = "removed"
)

View File

@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ErrorResponse Represents an error.
// swagger:model ErrorResponse
type ErrorResponse struct {
// The error message.
// Required: true
Message string `json:"message"`
}

View File

@ -1,6 +0,0 @@
package types
// ErrorResponse is the response body of API errors.
type ErrorResponse struct {
Message string `json:"message"`
}

View File

@ -79,8 +79,8 @@ func ToParamWithVersion(version string, a Args) (string, error) {
}
// for daemons older than v1.10, filter must be of the form map[string][]string
buf := []byte{}
err := errors.New("")
var buf []byte
var err error
if version != "" && versions.LessThan(version, "1.22") {
buf, err = json.Marshal(convertArgsToSlice(a.fields))
} else {
@ -144,6 +144,9 @@ func (filters Args) Add(name, value string) {
func (filters Args) Del(name, value string) {
if _, ok := filters.fields[name]; ok {
delete(filters.fields[name], value)
if len(filters.fields[name]) == 0 {
delete(filters.fields, name)
}
}
}
@ -165,7 +168,7 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
return true
}
if sources == nil || len(sources) == 0 {
if len(sources) == 0 {
return false
}

View File

@ -0,0 +1,17 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// GraphDriverData Information about a container's graph driver.
// swagger:model GraphDriverData
type GraphDriverData struct {
// data
// Required: true
Data map[string]string `json:"Data"`
// name
// Required: true
Name string `json:"Name"`
}

View File

@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// IDResponse Response to an API call that returns just an Id
// swagger:model IdResponse
type IDResponse struct {
// The id of the newly created object.
// Required: true
ID string `json:"Id"`
}

View File

@ -0,0 +1,15 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageDeleteResponseItem image delete response item
// swagger:model ImageDeleteResponseItem
type ImageDeleteResponseItem struct {
// The image ID of an image that was deleted
Deleted string `json:"Deleted,omitempty"`
// The image ID of an image that was untagged
Untagged string `json:"Untagged,omitempty"`
}

View File

@ -0,0 +1,49 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageSummary image summary
// swagger:model ImageSummary
type ImageSummary struct {
// containers
// Required: true
Containers int64 `json:"Containers"`
// created
// Required: true
Created int64 `json:"Created"`
// Id
// Required: true
ID string `json:"Id"`
// labels
// Required: true
Labels map[string]string `json:"Labels"`
// parent Id
// Required: true
ParentID string `json:"ParentId"`
// repo digests
// Required: true
RepoDigests []string `json:"RepoDigests"`
// repo tags
// Required: true
RepoTags []string `json:"RepoTags"`
// shared size
// Required: true
SharedSize int64 `json:"SharedSize"`
// size
// Required: true
Size int64 `json:"Size"`
// virtual size
// Required: true
VirtualSize int64 `json:"VirtualSize"`
}

View File

@ -1,24 +1,36 @@
package mount
import (
"os"
)
// Type represents the type of a mount.
type Type string
// Type constants
const (
// TypeBind BIND
// TypeBind is the type for mounting host dir
TypeBind Type = "bind"
// TypeVolume VOLUME
// TypeVolume is the type for remote storage volumes
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
)
// Mount represents a mount (volume).
type Mount struct {
Type Type `json:",omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
// Source is not supported for tmpfs (must be an empty value)
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Consistency Consistency `json:",omitempty"`
BindOptions *BindOptions `json:",omitempty"`
VolumeOptions *VolumeOptions `json:",omitempty"`
TmpfsOptions *TmpfsOptions `json:",omitempty"`
}
// Propagation represents the propagation of a mount.
@ -39,6 +51,30 @@ const (
PropagationSlave Propagation = "slave"
)
// Propagations is the list of all valid mount propagations
var Propagations = []Propagation{
PropagationRPrivate,
PropagationPrivate,
PropagationRShared,
PropagationShared,
PropagationRSlave,
PropagationSlave,
}
// Consistency represents the consistency requirements of a mount.
type Consistency string
const (
// ConsistencyFull guarantees bind-mount-like consistency
ConsistencyFull Consistency = "consistent"
// ConsistencyCached mounts can cache read data and FS structure
ConsistencyCached Consistency = "cached"
// ConsistencyDelegated mounts can cache read and written data and structure
ConsistencyDelegated Consistency = "delegated"
// ConsistencyDefault provides "consistent" behavior unless overridden
ConsistencyDefault Consistency = "default"
)
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation Propagation `json:",omitempty"`
@ -56,3 +92,37 @@ type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TmpfsOptions defines options specific to mounts of type "tmpfs".
type TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be converted to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//
// Percentages are not supported.
SizeBytes int64 `json:",omitempty"`
// Mode of the tmpfs upon creation
Mode os.FileMode `json:",omitempty"`
// TODO(stevvooe): There are several more tmpfs flags, specified in the
// daemon, that are accepted. Only the most basic are added for now.
//
// From docker/docker/pkg/mount/flags.go:
//
// var validFlags = map[string]bool{
// "": true,
// "size": true, X
// "mode": true, X
// "uid": true,
// "gid": true,
// "nr_inodes": true,
// "nr_blocks": true,
// "mpol": true,
// }
//
// Some of these may be straightforward to add, but others, such as
// uid/gid have implications in a clustered system.
}

View File

@ -28,6 +28,20 @@ type EndpointIPAMConfig struct {
LinkLocalIPs []string `json:",omitempty"`
}
// Copy makes a copy of the endpoint ipam config
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
cfgCopy := *cfg
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
return &cfgCopy
}
// PeerInfo represents one peer of an overlay network
type PeerInfo struct {
Name string
IP string
}
// EndpointSettings stores the network endpoint details
type EndpointSettings struct {
// Configurations
@ -44,6 +58,42 @@ type EndpointSettings struct {
GlobalIPv6Address string
GlobalIPv6PrefixLen int
MacAddress string
DriverOpts map[string]string
}
// Task carries the information about one backend task
type Task struct {
Name string
EndpointID string
EndpointIP string
Info map[string]string
}
// ServiceInfo represents service parameters with the list of service's tasks
type ServiceInfo struct {
VIP string
Ports []string
LocalLBIndex int
Tasks []Task
}
// Copy makes a deep copy of `EndpointSettings`
func (es *EndpointSettings) Copy() *EndpointSettings {
epCopy := *es
if es.IPAMConfig != nil {
epCopy.IPAMConfig = es.IPAMConfig.Copy()
}
if es.Links != nil {
links := make([]string, 0, len(es.Links))
epCopy.Links = append(links, es.Links...)
}
if es.Aliases != nil {
aliases := make([]string, 0, len(es.Aliases))
epCopy.Aliases = append(aliases, es.Aliases...)
}
return &epCopy
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
@ -51,3 +101,8 @@ type EndpointSettings struct {
type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
}
// ConfigReference specifies the source which provides a network's configuration
type ConfigReference struct {
Network string
}

View File

@ -1,168 +1,200 @@
package types
import (
"encoding/json"
"fmt"
)
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
}
// PluginConfig represents the values of settings potentially modifiable by a user
type PluginConfig struct {
Mounts []PluginMount
Env []string
Args []string
Devices []PluginDevice
}
// Plugin represents a Docker plugin for the remote API
// Plugin A plugin for the Engine API
// swagger:model Plugin
type Plugin struct {
// config
// Required: true
Config PluginConfig `json:"Config"`
// True when the plugin is running. False when the plugin is not running, only installed.
// Required: true
Enabled bool `json:"Enabled"`
// Id
ID string `json:"Id,omitempty"`
Name string
Tag string
// Enabled is true when the plugin is running, is false when the plugin is not running, only installed.
Enabled bool
Config PluginConfig
Manifest PluginManifest
// name
// Required: true
Name string `json:"Name"`
// plugin remote reference used to push/pull the plugin
PluginReference string `json:"PluginReference,omitempty"`
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
}
// PluginsListResponse contains the response for the remote API
type PluginsListResponse []*Plugin
// PluginConfig The config of a plugin.
// swagger:model PluginConfig
type PluginConfig struct {
const (
authzDriver = "AuthzDriver"
graphDriver = "GraphDriver"
ipamDriver = "IpamDriver"
networkDriver = "NetworkDriver"
volumeDriver = "VolumeDriver"
)
// args
// Required: true
Args PluginConfigArgs `json:"Args"`
// PluginInterfaceType represents a type that a plugin implements.
type PluginInterfaceType struct {
Prefix string // This is always "docker"
Capability string // Capability should be validated against the above list.
Version string // Plugin API version. Depends on the capability
// description
// Required: true
Description string `json:"Description"`
// Docker Version used to create the plugin
DockerVersion string `json:"DockerVersion,omitempty"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
// entrypoint
// Required: true
Entrypoint []string `json:"Entrypoint"`
// env
// Required: true
Env []PluginEnv `json:"Env"`
// interface
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// ipc host
// Required: true
IpcHost bool `json:"IpcHost"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
// network
// Required: true
Network PluginConfigNetwork `json:"Network"`
// pid host
// Required: true
PidHost bool `json:"PidHost"`
// propagated mount
// Required: true
PropagatedMount string `json:"PropagatedMount"`
// user
User PluginConfigUser `json:"User,omitempty"`
// work dir
// Required: true
WorkDir string `json:"WorkDir"`
// rootfs
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
versionIndex := len(p)
prefixIndex := 0
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
return fmt.Errorf("%q is not a plugin interface type", p)
}
p = p[1 : len(p)-1]
loop:
for i, b := range p {
switch b {
case '.':
prefixIndex = i
case '/':
versionIndex = i
break loop
}
}
t.Prefix = string(p[:prefixIndex])
t.Capability = string(p[prefixIndex+1 : versionIndex])
if versionIndex < len(p) {
t.Version = string(p[versionIndex+1:])
}
return nil
// PluginConfigArgs plugin config args
// swagger:model PluginConfigArgs
type PluginConfigArgs struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value []string `json:"Value"`
}
// MarshalJSON implements json.Marshaler for PluginInterfaceType
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
// PluginConfigInterface The interface between Docker and the plugin
// swagger:model PluginConfigInterface
type PluginConfigInterface struct {
// socket
// Required: true
Socket string `json:"Socket"`
// types
// Required: true
Types []PluginInterfaceType `json:"Types"`
}
// String implements fmt.Stringer for PluginInterfaceType
func (t PluginInterfaceType) String() string {
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
// PluginConfigLinux plugin config linux
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// allow all devices
// Required: true
AllowAllDevices bool `json:"AllowAllDevices"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
}
// PluginInterface describes the interface between Docker and plugin
type PluginInterface struct {
Types []PluginInterfaceType
Socket string
// PluginConfigNetwork plugin config network
// swagger:model PluginConfigNetwork
type PluginConfigNetwork struct {
// type
// Required: true
Type string `json:"Type"`
}
// PluginSetting is to be embedded in other structs, if they are supposed to be
// modifiable by the user.
type PluginSetting struct {
Name string
Description string
Settable []string
// PluginConfigRootfs plugin config rootfs
// swagger:model PluginConfigRootfs
type PluginConfigRootfs struct {
// diff ids
DiffIds []string `json:"diff_ids"`
// type
Type string `json:"type,omitempty"`
}
// PluginNetwork represents the network configuration for a plugin
type PluginNetwork struct {
Type string
// PluginConfigUser plugin config user
// swagger:model PluginConfigUser
type PluginConfigUser struct {
// g ID
GID uint32 `json:"GID,omitempty"`
// UID
UID uint32 `json:"UID,omitempty"`
}
// PluginMount represents the mount configuration for a plugin
type PluginMount struct {
PluginSetting
Source *string
Destination string
Type string
Options []string
}
// PluginSettings Settings that can be modified by users.
// swagger:model PluginSettings
type PluginSettings struct {
// PluginEnv represents an environment variable for a plugin
type PluginEnv struct {
PluginSetting
Value *string
}
// args
// Required: true
Args []string `json:"Args"`
// PluginArgs represents the command line arguments for a plugin
type PluginArgs struct {
PluginSetting
Value []string
}
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
// PluginDevice represents a device for a plugin
type PluginDevice struct {
PluginSetting
Path *string
}
// env
// Required: true
Env []string `json:"Env"`
// PluginUser represents the user for the plugin's process
type PluginUser struct {
UID uint32 `json:"Uid,omitempty"`
GID uint32 `json:"Gid,omitempty"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
}
// PluginManifest represents the manifest of a plugin
type PluginManifest struct {
ManifestVersion string
Description string
Documentation string
Interface PluginInterface
Entrypoint []string
Workdir string
User PluginUser `json:",omitempty"`
Network PluginNetwork
Capabilities []string
Mounts []PluginMount
Devices []PluginDevice
Env []PluginEnv
Args PluginArgs
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string
Description string
Value []string
}
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege

View File

@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginDevice plugin device
// swagger:model PluginDevice
type PluginDevice struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// path
// Required: true
Path *string `json:"Path"`
// settable
// Required: true
Settable []string `json:"Settable"`
}

View File

@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginEnv plugin env
// swagger:model PluginEnv
type PluginEnv struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value *string `json:"Value"`
}

View File

@ -0,0 +1,21 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginInterfaceType plugin interface type
// swagger:model PluginInterfaceType
type PluginInterfaceType struct {
// capability
// Required: true
Capability string `json:"Capability"`
// prefix
// Required: true
Prefix string `json:"Prefix"`
// version
// Required: true
Version string `json:"Version"`
}

View File

@ -0,0 +1,37 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginMount plugin mount
// swagger:model PluginMount
type PluginMount struct {
// description
// Required: true
Description string `json:"Description"`
// destination
// Required: true
Destination string `json:"Destination"`
// name
// Required: true
Name string `json:"Name"`
// options
// Required: true
Options []string `json:"Options"`
// settable
// Required: true
Settable []string `json:"Settable"`
// source
// Required: true
Source *string `json:"Source"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@ -0,0 +1,79 @@
package types
import (
"encoding/json"
"fmt"
"sort"
)
// PluginsListResponse contains the response for the Engine API
type PluginsListResponse []*Plugin
const (
authzDriver = "AuthzDriver"
graphDriver = "GraphDriver"
ipamDriver = "IpamDriver"
networkDriver = "NetworkDriver"
volumeDriver = "VolumeDriver"
)
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
versionIndex := len(p)
prefixIndex := 0
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
return fmt.Errorf("%q is not a plugin interface type", p)
}
p = p[1 : len(p)-1]
loop:
for i, b := range p {
switch b {
case '.':
prefixIndex = i
case '/':
versionIndex = i
break loop
}
}
t.Prefix = string(p[:prefixIndex])
t.Capability = string(p[prefixIndex+1 : versionIndex])
if versionIndex < len(p) {
t.Version = string(p[versionIndex+1:])
}
return nil
}
// MarshalJSON implements json.Marshaler for PluginInterfaceType
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
// String implements fmt.Stringer for PluginInterfaceType
func (t PluginInterfaceType) String() string {
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string
Description string
Value []string
}
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege
func (s PluginPrivileges) Len() int {
return len(s)
}
func (s PluginPrivileges) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PluginPrivileges) Swap(i, j int) {
sort.Strings(s[i].Value)
sort.Strings(s[j].Value)
s[i], s[j] = s[j], s[i]
}

23
vendor/github.com/docker/docker/api/types/port.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Port An open port on a container
// swagger:model Port
type Port struct {
// IP
IP string `json:"IP,omitempty"`
// Port on the container
// Required: true
PrivatePort uint16 `json:"PrivatePort"`
// Port exposed on the host
PublicPort uint16 `json:"PublicPort,omitempty"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@ -0,0 +1,21 @@
package registry
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// AuthenticateOKBody authenticate o k body
// swagger:model AuthenticateOKBody
type AuthenticateOKBody struct {
// An opaque token used to authenticate a user after a successful login
// Required: true
IdentityToken string `json:"IdentityToken"`
// The status of the authentication
// Required: true
Status string `json:"Status"`
}

View File

@ -3,10 +3,14 @@ package registry
import (
"encoding/json"
"net"
"github.com/opencontainers/image-spec/specs-go/v1"
)
// ServiceConfig stores daemon registry services configuration.
type ServiceConfig struct {
AllowNondistributableArtifactsCIDRs []*NetIPNet
AllowNondistributableArtifactsHostnames []string
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
@ -102,3 +106,14 @@ type SearchResults struct {
// Results is a slice containing the actual results for the search
Results []SearchResult `json:"results"`
}
// DistributionInspect describes the result obtained from contacting the
// registry to retrieve image metadata
type DistributionInspect struct {
// Descriptor contains information about the manifest, including
// the content addressable digest
Descriptor v1.Descriptor
// Platforms contains the list of platforms supported by the image,
// obtained by parsing the manifest
Platforms []v1.Platform
}

View File

@ -10,7 +10,7 @@ type Seccomp struct {
Syscalls []*Syscall `json:"syscalls"`
}
// Architecture is used to represent an specific architecture
// Architecture is used to represent a specific architecture
// and its sub-architectures
type Architecture struct {
Arch Arch `json:"architecture"`

View File

@ -0,0 +1,12 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ServiceUpdateResponse service update response
// swagger:model ServiceUpdateResponse
type ServiceUpdateResponse struct {
// Optional warning messages
Warnings []string `json:"Warnings"`
}

View File

@ -47,6 +47,9 @@ type CPUStats struct {
// System Usage. Linux only.
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
// Online CPUs. Linux only.
OnlineCPUs uint32 `json:"online_cpus,omitempty"`
// Throttling Data. Linux only.
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
@ -170,6 +173,9 @@ type Stats struct {
type StatsJSON struct {
Stats
Name string `json:"name,omitempty"`
ID string `json:"id,omitempty"`
// Networks request version >=1.21
Networks map[string]NetworkStats `json:"networks,omitempty"`
}

View File

@ -17,5 +17,24 @@ type Meta struct {
// Annotations represents how to describe an object.
type Annotations struct {
Name string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Labels map[string]string `json:"Labels"`
}
// Driver represents a driver (network, logging).
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TLSInfo represents the TLS information about what CA certificate is trusted,
// and who the issuer for a TLS certificate is
type TLSInfo struct {
// TrustRoot is the trusted CA root certificate in PEM format
TrustRoot string `json:",omitempty"`
// CertIssuer is the raw subject bytes of the issuer
CertIssuerSubject []byte `json:",omitempty"`
// CertIssuerPublicKey is the raw public key bytes of the issuer
CertIssuerPublicKey []byte `json:",omitempty"`
}

View File

@ -0,0 +1,31 @@
package swarm
import "os"
// Config represents a config.
type Config struct {
ID string
Meta
Spec ConfigSpec
}
// ConfigSpec represents a config specification from a config in swarm
type ConfigSpec struct {
Annotations
Data []byte `json:",omitempty"`
}
// ConfigReferenceFileTarget is a file target in a config reference
type ConfigReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// ConfigReference is a reference to a config in swarm
type ConfigReference struct {
File *ConfigReferenceFileTarget
ConfigID string
ConfigName string
}

View File

@ -3,20 +3,70 @@ package swarm
import (
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
)
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
// Detailed documentation is available in:
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
// `nameserver`, `search`, `options` have been supported.
// TODO: `domain` is not supported yet.
type DNSConfig struct {
// Nameservers specifies the IP addresses of the name servers
Nameservers []string `json:",omitempty"`
// Search specifies the search list for host-name lookup
Search []string `json:",omitempty"`
// Options allows certain internal resolver variables to be modified
Options []string `json:",omitempty"`
}
// SELinuxContext contains the SELinux labels of the container.
type SELinuxContext struct {
Disable bool
User string
Role string
Type string
Level string
}
// CredentialSpec for managed service account (Windows only)
type CredentialSpec struct {
File string
Registry string
}
// Privileges defines the security options for the container.
type Privileges struct {
CredentialSpec *CredentialSpec
SELinuxContext *SELinuxContext
}
// ContainerSpec represents the spec of a container.
type ContainerSpec struct {
Image string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Command []string `json:",omitempty"`
Args []string `json:",omitempty"`
Hostname string `json:",omitempty"`
Env []string `json:",omitempty"`
Dir string `json:",omitempty"`
User string `json:",omitempty"`
Groups []string `json:",omitempty"`
Privileges *Privileges `json:",omitempty"`
StopSignal string `json:",omitempty"`
TTY bool `json:",omitempty"`
OpenStdin bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Mounts []mount.Mount `json:",omitempty"`
StopGracePeriod *time.Duration `json:",omitempty"`
Healthcheck *container.HealthConfig `json:",omitempty"`
// The format of extra hosts on swarmkit is specified in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
Hosts []string `json:",omitempty"`
DNSConfig *DNSConfig `json:",omitempty"`
Secrets []*SecretReference `json:",omitempty"`
Configs []*ConfigReference `json:",omitempty"`
}

View File

@ -1,5 +1,9 @@
package swarm
import (
"github.com/docker/docker/api/types/network"
)
// Endpoint represents an endpoint.
type Endpoint struct {
Spec EndpointSpec `json:",omitempty"`
@ -31,8 +35,23 @@ type PortConfig struct {
TargetPort uint32 `json:",omitempty"`
// PublishedPort is the port on the swarm hosts
PublishedPort uint32 `json:",omitempty"`
// PublishMode is the mode in which port is published
PublishMode PortConfigPublishMode `json:",omitempty"`
}
// PortConfigPublishMode represents the mode in which the port is to
// be published.
type PortConfigPublishMode string
const (
// PortConfigPublishModeIngress is used for ports published
// for ingress load balancing using routing mesh.
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
// PortConfigPublishModeHost is used for ports published
// for direct host level access on the host where the task is running.
PortConfigPublishModeHost PortConfigPublishMode = "host"
)
// PortConfigProtocol represents the protocol of a port.
type PortConfigProtocol string
@ -67,13 +86,17 @@ type NetworkSpec struct {
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
Ingress bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
ConfigFrom *network.ConfigReference `json:",omitempty"`
Scope string `json:",omitempty"`
}
// NetworkAttachmentConfig represents the configuration of a network attachment.
type NetworkAttachmentConfig struct {
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
DriverOpts map[string]string `json:",omitempty"`
}
// NetworkAttachment represents a network attachment.
@ -94,9 +117,3 @@ type IPAMConfig struct {
Range string `json:",omitempty"`
Gateway string `json:",omitempty"`
}
// Driver represents a network driver.
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}

View File

@ -52,9 +52,10 @@ type NodeDescription struct {
Platform Platform `json:",omitempty"`
Resources Resources `json:",omitempty"`
Engine EngineDescription `json:",omitempty"`
TLSInfo TLSInfo `json:",omitempty"`
}
// Platform represents the platfrom (Arch/OS).
// Platform represents the platform (Arch/OS).
type Platform struct {
Architecture string `json:",omitempty"`
OS string `json:",omitempty"`
@ -77,6 +78,7 @@ type PluginDescription struct {
type NodeStatus struct {
State NodeState `json:",omitempty"`
Message string `json:",omitempty"`
Addr string `json:",omitempty"`
}
// Reachability represents the reachability of a node.

View File

@ -0,0 +1,19 @@
package swarm
// RuntimeType is the type of runtime used for the TaskSpec
type RuntimeType string
// RuntimeURL is the proto type url
type RuntimeURL string
const (
// RuntimeContainer is the container based runtime
RuntimeContainer RuntimeType = "container"
// RuntimePlugin is the plugin based runtime
RuntimePlugin RuntimeType = "plugin"
// RuntimeURLContainer is the proto url for the container type
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
// RuntimeURLPlugin is the proto url for the plugin type
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
)

View File

@ -0,0 +1,31 @@
package swarm
import "os"
// Secret represents a secret.
type Secret struct {
ID string
Meta
Spec SecretSpec
}
// SecretSpec represents a secret specification from a secret in swarm
type SecretSpec struct {
Annotations
Data []byte `json:",omitempty"`
}
// SecretReferenceFileTarget is a file target in a secret reference
type SecretReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// SecretReference is a reference to a secret in swarm
type SecretReference struct {
File *SecretReferenceFileTarget
SecretID string
SecretName string
}

View File

@ -7,8 +7,9 @@ type Service struct {
ID string
Meta
Spec ServiceSpec `json:",omitempty"`
PreviousSpec *ServiceSpec `json:",omitempty"`
Endpoint Endpoint `json:",omitempty"`
UpdateStatus UpdateStatus `json:",omitempty"`
UpdateStatus *UpdateStatus `json:",omitempty"`
}
// ServiceSpec represents the spec of a service.
@ -20,6 +21,7 @@ type ServiceSpec struct {
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`
RollbackConfig *UpdateConfig `json:",omitempty"`
// Networks field in ServiceSpec is deprecated. The
// same field in TaskSpec should be used instead.
@ -44,13 +46,19 @@ const (
UpdateStatePaused UpdateState = "paused"
// UpdateStateCompleted is the completed state.
UpdateStateCompleted UpdateState = "completed"
// UpdateStateRollbackStarted is the state with a rollback in progress.
UpdateStateRollbackStarted UpdateState = "rollback_started"
// UpdateStateRollbackPaused is the state with a rollback in progress.
UpdateStateRollbackPaused UpdateState = "rollback_paused"
// UpdateStateRollbackCompleted is the state with a rollback in progress.
UpdateStateRollbackCompleted UpdateState = "rollback_completed"
)
// UpdateStatus reports the status of a service update.
type UpdateStatus struct {
State UpdateState `json:",omitempty"`
StartedAt time.Time `json:",omitempty"`
CompletedAt time.Time `json:",omitempty"`
StartedAt *time.Time `json:",omitempty"`
CompletedAt *time.Time `json:",omitempty"`
Message string `json:",omitempty"`
}
@ -67,11 +75,50 @@ const (
UpdateFailureActionPause = "pause"
// UpdateFailureActionContinue CONTINUE
UpdateFailureActionContinue = "continue"
// UpdateFailureActionRollback ROLLBACK
UpdateFailureActionRollback = "rollback"
// UpdateOrderStopFirst STOP_FIRST
UpdateOrderStopFirst = "stop-first"
// UpdateOrderStartFirst START_FIRST
UpdateOrderStartFirst = "start-first"
)
// UpdateConfig represents the update configuration.
type UpdateConfig struct {
Parallelism uint64 `json:",omitempty"`
// Maximum number of tasks to be updated in one iteration.
// 0 means unlimited parallelism.
Parallelism uint64
// Amount of time between updates.
Delay time.Duration `json:",omitempty"`
// FailureAction is the action to take when an update failures.
FailureAction string `json:",omitempty"`
// Monitor indicates how long to monitor a task for failure after it is
// created. If the task fails by ending up in one of the states
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
// this counts as a failure. If it fails after Monitor, it does not
// count as a failure. If Monitor is unspecified, a default value will
// be used.
Monitor time.Duration `json:",omitempty"`
// MaxFailureRatio is the fraction of tasks that may fail during
// an update before the failure action is invoked. Any task created by
// the current update which ends up in one of the states REJECTED,
// COMPLETED or FAILED within Monitor from its creation counts as a
// failure. The number of failures is divided by the number of tasks
// being updated, and if this fraction is greater than
// MaxFailureRatio, the failure action is invoked.
//
// If the failure action is CONTINUE, there is no effect.
// If the failure action is PAUSE, no more tasks will be updated until
// another update is started.
MaxFailureRatio float32
// Order indicates the order of operations when rolling out an updated
// task. Either the old task is shut down before the new task is
// started, or the new task is started before the old task is shut down.
Order string
}

View File

@ -8,6 +8,8 @@ type ClusterInfo struct {
ID string
Meta
Spec Spec
TLSInfo TLSInfo
RootRotationInProgress bool
}
// Swarm represents a swarm.
@ -33,6 +35,7 @@ type Spec struct {
Dispatcher DispatcherConfig `json:",omitempty"`
CAConfig CAConfig `json:",omitempty"`
TaskDefaults TaskDefaults `json:",omitempty"`
EncryptionConfig EncryptionConfig `json:",omitempty"`
}
// OrchestrationConfig represents orchestration configuration.
@ -53,6 +56,14 @@ type TaskDefaults struct {
LogDriver *Driver `json:",omitempty"`
}
// EncryptionConfig controls at-rest encryption of data and keys.
type EncryptionConfig struct {
// AutoLockManagers specifies whether or not managers TLS keys and raft data
// should be encrypted at rest in such a way that they must be unlocked
// before the manager node starts up again.
AutoLockManagers bool
}
// RaftConfig represents raft configuration.
type RaftConfig struct {
// SnapshotInterval is the number of log entries between snapshots.
@ -60,7 +71,7 @@ type RaftConfig struct {
// KeepOldSnapshots is the number of snapshots to keep beyond the
// current snapshot.
KeepOldSnapshots uint64 `json:",omitempty"`
KeepOldSnapshots *uint64 `json:",omitempty"`
// LogEntriesForSlowFollowers is the number of log entries to keep
// around to sync up slow followers after a snapshot is created.
@ -98,6 +109,16 @@ type CAConfig struct {
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
ExternalCAs []*ExternalCA `json:",omitempty"`
// SigningCACert and SigningCAKey specify the desired signing root CA and
// root CA key for the swarm. When inspecting the cluster, the key will
// be redacted.
SigningCACert string `json:",omitempty"`
SigningCAKey string `json:",omitempty"`
// If this value changes, and there is no specified signing cert and key,
// then the swarm is forced to generate a new root certificate ane key.
ForceRotate uint64 `json:",omitempty"`
}
// ExternalCAProtocol represents type of external CA.
@ -117,22 +138,37 @@ type ExternalCA struct {
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `json:",omitempty"`
// CACert specifies which root CA is used by this external CA. This certificate must
// be in PEM format.
CACert string
}
// InitRequest is the request used to init a swarm.
type InitRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
ForceNewCluster bool
Spec Spec
AutoLockManagers bool
Availability NodeAvailability
}
// JoinRequest is the request used to join a swarm.
type JoinRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
RemoteAddrs []string
JoinToken string // accept by secret
Availability NodeAvailability
}
// UnlockRequest is the request used to unlock a swarm.
type UnlockRequest struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// LocalNodeState represents the state of the local node.
@ -147,6 +183,8 @@ const (
LocalNodeStateActive LocalNodeState = "active"
// LocalNodeStateError ERROR
LocalNodeStateError LocalNodeState = "error"
// LocalNodeStateLocked LOCKED
LocalNodeStateLocked LocalNodeState = "locked"
)
// Info represents generic information about swarm.
@ -159,10 +197,10 @@ type Info struct {
Error string
RemoteManagers []Peer
Nodes int
Managers int
Nodes int `json:",omitempty"`
Managers int `json:",omitempty"`
Cluster ClusterInfo
Cluster *ClusterInfo `json:",omitempty"`
}
// Peer represents a peer.
@ -175,4 +213,5 @@ type Peer struct {
type UpdateFlags struct {
RotateWorkerToken bool
RotateManagerToken bool
RotateManagerUnlockKey bool
}

View File

@ -61,6 +61,15 @@ type TaskSpec struct {
// spec. If not present, the one on cluster default on swarm.Spec will be
// used, finally falling back to the engine default if not specified.
LogDriver *Driver `json:",omitempty"`
// ForceUpdate is a counter that triggers an update even if no relevant
// parameters have been changed.
ForceUpdate uint64
Runtime RuntimeType `json:",omitempty"`
// TODO (ehazlett): this should be removed and instead
// use struct tags (proto) for the runtimes
RuntimeData []byte `json:",omitempty"`
}
// Resources represents resources (CPU/Memory).
@ -78,6 +87,25 @@ type ResourceRequirements struct {
// Placement represents orchestration parameters.
type Placement struct {
Constraints []string `json:",omitempty"`
Preferences []PlacementPreference `json:",omitempty"`
// Platforms stores all the platforms that the image can run on.
// This field is used in the platform filter for scheduling. If empty,
// then the platform filter is off, meaning there are no scheduling restrictions.
Platforms []Platform `json:",omitempty"`
}
// PlacementPreference provides a way to make the scheduler aware of factors
// such as topology.
type PlacementPreference struct {
Spread *SpreadOver
}
// SpreadOver is a scheduling preference that instructs the scheduler to spread
// tasks evenly over groups of nodes identified by labels.
type SpreadOver struct {
// label descriptor, such as engine.labels.az
SpreadDescriptor string
}
// RestartPolicy represents the restart policy.
@ -107,6 +135,7 @@ type TaskStatus struct {
Message string `json:",omitempty"`
Err string `json:",omitempty"`
ContainerStatus ContainerStatus `json:",omitempty"`
PortStatus PortStatus `json:",omitempty"`
}
// ContainerStatus represents the status of a container.
@ -115,3 +144,9 @@ type ContainerStatus struct {
PID int `json:",omitempty"`
ExitCode int `json:",omitempty"`
}
// PortStatus represents the port status of a task's host ports whose
// service has published host ports
type PortStatus struct {
Ports []PortConfig `json:",omitempty"`
}

View File

@ -1,11 +1,15 @@
package types
import (
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
@ -13,99 +17,6 @@ import (
"github.com/docker/go-connections/nat"
)
// ContainerCreateResponse contains the information returned to a client on the
// creation of a new container.
type ContainerCreateResponse struct {
// ID is the ID of the created container.
ID string `json:"Id"`
// Warnings are any warnings encountered during the creation of the container.
Warnings []string `json:"Warnings"`
}
// ContainerExecCreateResponse contains response of Remote API:
// POST "/containers/{name:.*}/exec"
type ContainerExecCreateResponse struct {
// ID is the exec ID.
ID string `json:"Id"`
}
// ContainerUpdateResponse contains response of Remote API:
// POST "/containers/{name:.*}/update"
type ContainerUpdateResponse struct {
// Warnings are any warnings encountered during the updating of the container.
Warnings []string `json:"Warnings"`
}
// AuthResponse contains response of Remote API:
// POST "/auth"
type AuthResponse struct {
// Status is the authentication status
Status string `json:"Status"`
// IdentityToken is an opaque token used for authenticating
// a user after a successful login.
IdentityToken string `json:"IdentityToken,omitempty"`
}
// ContainerWaitResponse contains response of Remote API:
// POST "/containers/"+containerID+"/wait"
type ContainerWaitResponse struct {
// StatusCode is the status code of the wait job
StatusCode int `json:"StatusCode"`
}
// ContainerCommitResponse contains response of Remote API:
// POST "/commit?container="+containerID
type ContainerCommitResponse struct {
ID string `json:"Id"`
}
// ContainerChange contains response of Remote API:
// GET "/containers/{name:.*}/changes"
type ContainerChange struct {
Kind int
Path string
}
// ImageHistory contains response of Remote API:
// GET "/images/{name:.*}/history"
type ImageHistory struct {
ID string `json:"Id"`
Created int64
CreatedBy string
Tags []string
Size int64
Comment string
}
// ImageDelete contains response of Remote API:
// DELETE "/images/{name:.*}"
type ImageDelete struct {
Untagged string `json:",omitempty"`
Deleted string `json:",omitempty"`
}
// Image contains response of Remote API:
// GET "/images/json"
type Image struct {
ID string `json:"Id"`
ParentID string `json:"ParentId"`
RepoTags []string
RepoDigests []string
Created int64
Size int64
VirtualSize int64
Labels map[string]string
}
// GraphDriverData returns Image's graph driver config info
// when calling inspect command
type GraphDriverData struct {
Name string
Data map[string]string
}
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string
@ -113,7 +24,7 @@ type RootFS struct {
BaseLayer string `json:",omitempty"`
}
// ImageInspect contains response of Remote API:
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
ID string `json:"Id"`
@ -129,22 +40,14 @@ type ImageInspect struct {
Config *container.Config
Architecture string
Os string
OsVersion string `json:",omitempty"`
Size int64
VirtualSize int64
GraphDriver GraphDriverData
RootFS RootFS
}
// Port stores open ports info of container
// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"}
type Port struct {
IP string `json:",omitempty"`
PrivatePort int
PublicPort int `json:",omitempty"`
Type string
}
// Container contains response of Remote API:
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
@ -166,7 +69,7 @@ type Container struct {
Mounts []MountPoint
}
// CopyConfig contains request body of Remote API:
// CopyConfig contains request body of Engine API:
// POST "/containers/"+containerID+"/copy"
type CopyConfig struct {
Resource string
@ -183,25 +86,27 @@ type ContainerPathStat struct {
LinkTarget string `json:"linkTarget"`
}
// ContainerStats contains response of Remote API:
// ContainerStats contains response of Engine API:
// GET "/stats"
type ContainerStats struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}
// ContainerProcessList contains response of Remote API:
// GET "/containers/{name:.*}/top"
type ContainerProcessList struct {
Processes [][]string
Titles []string
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
OSType string
Experimental bool
}
// Version contains response of Remote API:
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
GitCommit string
GoVersion string
Os string
@ -211,7 +116,14 @@ type Version struct {
BuildTime string `json:",omitempty"`
}
// Info contains response of Remote API:
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
// in the version-string of external tools, such as containerd, or runC.
type Commit struct {
ID string // ID is the actual commit ID of external tool.
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
}
// Info contains response of Engine API:
// GET "/info"
type Info struct {
ID string
@ -260,7 +172,6 @@ type Info struct {
ServerVersion string
ClusterStore string
ClusterAdvertise string
SecurityOptions []string
Runtimes map[string]Runtime
DefaultRuntime string
Swarm swarm.Info
@ -269,6 +180,53 @@ type Info struct {
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
}
// KeyValue holds a key/value pair
type KeyValue struct {
Key, Value string
}
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a type safe
// SecurityOpt
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
split := strings.Split(opt, ",")
for _, s := range split {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid security option %q", s)
}
if kv[0] == "" || kv[1] == "" {
return nil, errors.New("invalid empty security option")
}
if kv[0] == "name" {
secopt.Name = kv[1]
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
}
so = append(so, secopt)
}
return so, nil
}
// PluginsInfo is a temp struct holding Plugins name
@ -280,6 +238,8 @@ type PluginsInfo struct {
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// ExecStartCheck is a temp struct used by execStart
@ -301,6 +261,7 @@ type HealthcheckResult struct {
// Health states
const (
NoHealthcheck = "none" // Indicates there is no healthcheck
Starting = "starting" // Starting indicates that the container is not yet ready
Healthy = "healthy" // Healthy indicates that the container is running correctly
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
@ -316,7 +277,7 @@ type Health struct {
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Running bool
Paused bool
Restarting bool
@ -342,7 +303,7 @@ type ContainerNode struct {
Labels map[string]string
}
// ContainerJSONBase contains response of Remote API:
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
@ -430,45 +391,25 @@ type MountPoint struct {
Propagation mount.Propagation
}
// Volume represents the configuration of a volume for the remote API
type Volume struct {
Name string // Name is the name of the volume
Driver string // Driver is the Driver name used to create the volume
Mountpoint string // Mountpoint is the location on disk of the volume
Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
Labels map[string]string // Labels is metadata specific to the volume
Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level)
}
// VolumesListResponse contains the response for the remote API:
// GET "/volumes"
type VolumesListResponse struct {
Volumes []*Volume // Volumes is the list of volumes being returned
Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers
}
// VolumeCreateRequest contains the request for the remote API:
// POST "/volumes/create"
type VolumeCreateRequest struct {
Name string // Name is the requested name of the volume
Driver string // Driver is the name of the driver that should be used to create the volume
DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume.
Labels map[string]string // Labels holds metadata specific to the volume being created.
}
// NetworkResource is the body of the "get network" http response message
type NetworkResource struct {
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
Services map[string]network.ServiceInfo `json:",omitempty"`
}
// EndpointResource contains network resources allocated and used for a container in a network
@ -482,12 +423,23 @@ type EndpointResource struct {
// NetworkCreate is the expected body of the "create network" http request message
type NetworkCreate struct {
// Check for networks with duplicate names.
// Network is primarily keyed based on a random ID and not on the name.
// Network name is strictly a user-friendly alias to the network
// which is uniquely identified using ID.
// And there is no guaranteed way to check for duplicates.
// Option CheckDuplicate is there to provide a best effort checking of any networks
// which has the same name but it is not guaranteed to catch all name collisions.
CheckDuplicate bool
Driver string
Scope string
EnableIPv6 bool
IPAM *network.IPAM
Internal bool
Attachable bool
Ingress bool
ConfigOnly bool
ConfigFrom *network.ConfigReference
Options map[string]string
Labels map[string]string
}
@ -526,3 +478,77 @@ type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
}
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
}
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
type ContainersPruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune"
type VolumesPruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDeleteResponseItem
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {
NetworksDeleted []string
}
// SecretCreateResponse contains the information returned to a client
// on the creation of a new secret.
type SecretCreateResponse struct {
// ID is the id of the created secret.
ID string
}
// SecretListOptions holds parameters to list secrets
type SecretListOptions struct {
Filters filters.Args
}
// ConfigCreateResponse contains the information returned to a client
// on the creation of a new config.
type ConfigCreateResponse struct {
// ID is the id of the created config.
ID string
}
// ConfigListOptions holds parameters to list configs
type ConfigListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
type PushResult struct {
Tag string
Digest string
Size int
}
// BuildResult contains the image id of a successful build
type BuildResult struct {
ID string
}

58
vendor/github.com/docker/docker/api/types/volume.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Volume volume
// swagger:model Volume
type Volume struct {
// Name of the volume driver used by the volume.
// Required: true
Driver string `json:"Driver"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// Mount path of the volume on the host.
// Required: true
Mountpoint string `json:"Mountpoint"`
// Name of the volume.
// Required: true
Name string `json:"Name"`
// The driver specific options used when creating the volume.
// Required: true
Options map[string]string `json:"Options"`
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
// Required: true
Scope string `json:"Scope"`
// Low-level details about the volume, provided by the volume driver.
// Details are returned as a map with key/value pairs:
// `{"key":"value","key2":"value2"}`.
//
// The `Status` field is optional, and is omitted if the volume driver
// does not support this feature.
//
Status map[string]interface{} `json:"Status,omitempty"`
// usage data
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
}
// VolumeUsageData volume usage data
// swagger:model VolumeUsageData
type VolumeUsageData struct {
// The number of containers referencing this volume.
// Required: true
RefCount int64 `json:"RefCount"`
// The disk space used by the volume (local driver only)
// Required: true
Size int64 `json:"Size"`
}

View File

@ -1,149 +0,0 @@
package mount
import (
"fmt"
"strings"
)
var flags = map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
var validFlags = map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
var propagationFlags = map[string]bool{
"bind": true,
"rbind": true,
"unbindable": true,
"runbindable": true,
"private": true,
"rprivate": true,
"shared": true,
"rshared": true,
"slave": true,
"rslave": true,
}
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
func MergeTmpfsOptions(options []string) ([]string, error) {
// We use collisions maps to remove duplicates.
// For flag, the key is the flag value (the key for propagation flag is -1)
// For data=value, the key is the data
flagCollisions := map[int]bool{}
dataCollisions := map[string]bool{}
var newOptions []string
// We process in reverse order
for i := len(options) - 1; i >= 0; i-- {
option := options[i]
if option == "defaults" {
continue
}
if f, ok := flags[option]; ok && f.flag != 0 {
// There is only one propagation mode
key := f.flag
if propagationFlags[option] {
key = -1
}
// Check to see if there is collision for flag
if !flagCollisions[key] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
flagCollisions[key] = true
}
continue
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
dataCollisions[opt[0]] = true
}
}
return newOptions, nil
}
// Parse fstab type mount options into mount() flags
// and device specific data
func parseOptions(options string) (int, string) {
var (
flag int
data []string
)
for _, o := range strings.Split(options, ",") {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &= ^f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
}
// ParseTmpfsOptions parse fstab type mount options into flags and data
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := parseOptions(options)
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
}
}
return flags, data, nil
}

View File

@ -1,48 +0,0 @@
// +build freebsd,cgo
package mount
/*
#include <sys/mount.h>
*/
import "C"
const (
// RDONLY will mount the filesystem as read-only.
RDONLY = C.MNT_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = C.MNT_NOSUID
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = C.MNT_NOEXEC
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
SYNCHRONOUS = C.MNT_SYNCHRONOUS
// NOATIME will not update the file access time when reading from a file.
NOATIME = C.MNT_NOATIME
)
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NODEV = 0
NODIRATIME = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIVE = 0
RELATIME = 0
REMOUNT = 0
STRICTATIME = 0
)

View File

@ -1,85 +0,0 @@
package mount
import (
"syscall"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = syscall.MS_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = syscall.MS_NOSUID
// NODEV will not interpret character or block special devices on the file
// system.
NODEV = syscall.MS_NODEV
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = syscall.MS_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = syscall.MS_DIRSYNC
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = syscall.MS_REMOUNT
// MANDLOCK will force mandatory locks on a filesystem.
MANDLOCK = syscall.MS_MANDLOCK
// NOATIME will not update the file access time when reading from a file.
NOATIME = syscall.MS_NOATIME
// NODIRATIME will not update the directory access time.
NODIRATIME = syscall.MS_NODIRATIME
// BIND remounts a subtree somewhere else.
BIND = syscall.MS_BIND
// RBIND remounts a subtree and all possible submounts somewhere else.
RBIND = syscall.MS_BIND | syscall.MS_REC
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
UNBINDABLE = syscall.MS_UNBINDABLE
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
// PRIVATE creates a mount which carries no propagation abilities.
PRIVATE = syscall.MS_PRIVATE
// RPRIVATE marks the entire mount tree as PRIVATE.
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
// SLAVE creates a mount which receives propagation from its master, but not
// vice versa.
SLAVE = syscall.MS_SLAVE
// RSLAVE marks the entire mount tree as SLAVE.
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
// SHARED creates a mount which provides the ability to create mirrors of
// that mount such that mounts and unmounts within any of the mirrors
// propagate to the other mirrors.
SHARED = syscall.MS_SHARED
// RSHARED marks the entire mount tree as SHARED.
RSHARED = syscall.MS_SHARED | syscall.MS_REC
// RELATIME updates inode access times relative to modify or change time.
RELATIME = syscall.MS_RELATIME
// STRICTATIME allows to explicitly request full atime updates. This makes
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = syscall.MS_STRICTATIME
)

View File

@ -1,30 +0,0 @@
// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
package mount
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NOATIME = 0
NODEV = 0
NODIRATIME = 0
NOEXEC = 0
NOSUID = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIME = 0
RELATIVE = 0
REMOUNT = 0
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
)

View File

@ -1,74 +0,0 @@
package mount
import (
"time"
)
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {
return false, err
}
// Search the table for the mountpoint
for _, e := range entries {
if e.Mountpoint == mountpoint {
return true, nil
}
}
return false, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, _ := parseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return ForceMount(device, target, mType, options)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
return err
}
return nil
}
// Unmount will unmount the target filesystem, so long as it is mounted.
func Unmount(target string) error {
if mounted, err := Mounted(target); err != nil || !mounted {
return err
}
return ForceUnmount(target)
}
// ForceUnmount will force an unmount of the target filesystem, regardless if
// it is mounted or not.
func ForceUnmount(target string) (err error) {
// Simple retry logic for unmount
for i := 0; i < 10; i++ {
if err = unmount(target, 0); err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return
}

Some files were not shown because too many files have changed in this diff Show More