Merge remote-tracking branch 'upstream/master' into kompose_labels_test

This commit is contained in:
Anush Shetty 2017-05-25 12:19:02 +05:30
commit 5f34c7368c
126 changed files with 2769 additions and 1944 deletions

View File

@ -12,18 +12,15 @@ import (
var completion = &cobra.Command{ var completion = &cobra.Command{
Use: "completion SHELL", Use: "completion SHELL",
Short: "Generate a bash auto completion file", Short: "Output shell completion code",
Long: `Generate a bash auto completion script. Long: `Generates shell completion code.
Auto completion supports both bash and zsh. Auto completion supports both bash and zsh. Output is to STDOUT.
The output is to stdout. source <(kompose completion bash)
source <(kompose completion zsh)
Examples: Will load the shell completion code.
$ source <(kompose completion bash)
$ source <(kompose completion zsh)
Will load the bash completion code.
`, `,
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {

View File

@ -35,7 +35,7 @@ This document outlines all the conversion details regarding `docker-compose.yaml
| pid | | N | | | | pid | | N | | |
| ports | | Y | [Service.Spec.Ports](https://kubernetes.io/docs/api-reference/v1/definitions/#_v1_containerport) | | | ports | | Y | [Service.Spec.Ports](https://kubernetes.io/docs/api-reference/v1/definitions/#_v1_containerport) | |
| security_opt | | N | | | | security_opt | | N | | |
| stop_grace_period | | N | | | | stop_grace_period | | Y | [Pod.Spec.TerminationGracePeriodSeconds](https://kubernetes.io/docs/resources-reference/v1.6/#podspec-v1-core) | |
| stop_signal | | N | | | | stop_signal | | N | | |
| sysctls | | N | | | | sysctls | | N | | |
| ulimits | | N | | See this [issue](https://github.com/kubernetes/kubernetes/issues/3595) on the k8s repo | | ulimits | | N | | See this [issue](https://github.com/kubernetes/kubernetes/issues/3595) on the k8s repo |

View File

@ -8,13 +8,13 @@ Kompose is released via GitHub on a three-week cycle, you can see all current re
```sh ```sh
# Linux # Linux
curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.5.0/kompose-linux-amd64 -o kompose curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.6.0/kompose-linux-amd64 -o kompose
# macOS # macOS
curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.5.0/kompose-darwin-amd64 -o kompose curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.6.0/kompose-darwin-amd64 -o kompose
# Windows # Windows
curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.5.0/kompose-windows-amd64.exe -o kompose.exe curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.6.0/kompose-windows-amd64.exe -o kompose.exe
chmod +x kompose chmod +x kompose
sudo mv ./kompose /usr/local/bin/kompose sudo mv ./kompose /usr/local/bin/kompose

24
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 12a90ec6d0d5c29da6e19bfe5b4db322154c9d13777ef2c0c2174190ff1c1e79 hash: 3036ae90e1ce5e101dedcf0270bae7f148557cbb7f94e537784bb28650d088bb
updated: 2017-04-21T16:45:28.958337712+05:30 updated: 2017-05-15T18:20:57.195406298+05:30
imports: imports:
- name: cloud.google.com/go - name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821 version: 3b1ae45394a234c385be014e9a488f2bb6eef821
@ -190,7 +190,7 @@ imports:
- name: github.com/docker/go-units - name: github.com/docker/go-units
version: 0bbddae09c5a5419a8c6dcdd7ff90da3d450393b version: 0bbddae09c5a5419a8c6dcdd7ff90da3d450393b
- name: github.com/docker/libcompose - name: github.com/docker/libcompose
version: c6a7d4679d065a4f50e08d4d1fe13776062cf1ec version: 01ff8920a57835fda41607e0f27252408e99cf08
subpackages: subpackages:
- config - config
- logger - logger
@ -332,7 +332,7 @@ imports:
- runtime/internal - runtime/internal
- utilities - utilities
- name: github.com/hashicorp/hcl - name: github.com/hashicorp/hcl
version: 7fa7fff964d035e8a162cce3a164b3ad02ad651b version: 392dba7d905ed5d04a5794ba89f558b27e2ba1ca
subpackages: subpackages:
- hcl/ast - hcl/ast
- hcl/parser - hcl/parser
@ -357,7 +357,7 @@ imports:
subpackages: subpackages:
- pbutil - pbutil
- name: github.com/mitchellh/mapstructure - name: github.com/mitchellh/mapstructure
version: 53818660ed4955e899c0bcafa97299a388bd7c8e version: cc8532a8e9a55ea36402aa21efdf403a60d34096
- name: github.com/openshift/origin - name: github.com/openshift/origin
version: b4e0954faa4a0d11d9c1a536b76ad4a8c0206b7c version: b4e0954faa4a0d11d9c1a536b76ad4a8c0206b7c
subpackages: subpackages:
@ -405,9 +405,9 @@ imports:
- name: github.com/pelletier/go-buffruneio - name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992 version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml - name: github.com/pelletier/go-toml
version: fe206efb84b2bc8e8cfafe6b4c1826622be969e3 version: 685a1f1cb7a66b9cadbe8f1ac49d9f8f567d6a9d
- name: github.com/pkg/errors - name: github.com/pkg/errors
version: ff09b135c25aae272398c51a07235b90a75aa4f0 version: c605e284fe17294bda444b34710735b29d1a9d90
- name: github.com/prometheus/client_golang - name: github.com/prometheus/client_golang
version: e51041b3fa41cece0dca035740ba6411905be473 version: e51041b3fa41cece0dca035740ba6411905be473
subpackages: subpackages:
@ -433,11 +433,11 @@ imports:
- name: github.com/spf13/cast - name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
- name: github.com/spf13/cobra - name: github.com/spf13/cobra
version: 10f6b9d7e1631a54ad07c5c0fb71c28a1abfd3c2 version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77
- name: github.com/spf13/jwalterweatherman - name: github.com/spf13/jwalterweatherman
version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99
- name: github.com/spf13/pflag - name: github.com/spf13/pflag
version: 2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51 version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/spf13/viper - name: github.com/spf13/viper
version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2
- name: github.com/ugorji/go - name: github.com/ugorji/go
@ -449,7 +449,7 @@ imports:
- name: github.com/xeipuuv/gojsonreference - name: github.com/xeipuuv/gojsonreference
version: e02fc20de94c78484cd5ffb007f8af96be030a45 version: e02fc20de94c78484cd5ffb007f8af96be030a45
- name: github.com/xeipuuv/gojsonschema - name: github.com/xeipuuv/gojsonschema
version: 702b404897d4364af44dc8dcabc9815947942325 version: a55c211c418162597a32c74c7230f81adb5ad616
- name: golang.org/x/net - name: golang.org/x/net
version: e90d6d0afc4c315a0d87a568ae68577cc15149a0 version: e90d6d0afc4c315a0d87a568ae68577cc15149a0
subpackages: subpackages:
@ -473,7 +473,7 @@ imports:
- jws - jws
- jwt - jwt
- name: golang.org/x/sys - name: golang.org/x/sys
version: ea9bcade75cb975a0b9738936568ab388b845617 version: 1e99a4f9d247b28c670884b9a8d6801f39a47b77
subpackages: subpackages:
- unix - unix
- name: golang.org/x/text - name: golang.org/x/text

View File

@ -14,7 +14,7 @@ import:
- package: github.com/pkg/errors - package: github.com/pkg/errors
- package: github.com/docker/libcompose - package: github.com/docker/libcompose
version: c6a7d4679d065a4f50e08d4d1fe13776062cf1ec version: 01ff8920a57835fda41607e0f27252408e99cf08
subpackages: subpackages:
- config - config
- lookup - lookup

View File

@ -81,7 +81,9 @@ type ServiceConfig struct {
User string `compose:"user" bundle:"User"` User string `compose:"user" bundle:"User"`
VolumesFrom []string `compose:"volumes_from" bundle:""` VolumesFrom []string `compose:"volumes_from" bundle:""`
ServiceType string `compose:"kompose.service.type" bundle:""` ServiceType string `compose:"kompose.service.type" bundle:""`
StopGracePeriod string `compose:"stop_grace_period" bundle:""`
Build string `compose:"build" bundle:""` Build string `compose:"build" bundle:""`
BuildArgs map[string]*string `compose:"build-args" bundle:""`
ExposeService string `compose:"kompose.service.expose" bundle:""` ExposeService string `compose:"kompose.service.expose" bundle:""`
Stdin bool `compose:"stdin_open" bundle:""` Stdin bool `compose:"stdin_open" bundle:""`
Tty bool `compose:"tty" bundle:""` Tty bool `compose:"tty" bundle:""`

View File

@ -317,10 +317,15 @@ func (c *Compose) LoadFile(files []string) (kobject.KomposeObject, error) {
serviceConfig := kobject.ServiceConfig{} serviceConfig := kobject.ServiceConfig{}
serviceConfig.Image = composeServiceConfig.Image serviceConfig.Image = composeServiceConfig.Image
serviceConfig.Build = composeServiceConfig.Build.Context serviceConfig.Build = composeServiceConfig.Build.Context
serviceConfig.ContainerName = composeServiceConfig.ContainerName newName := normalizeServiceNames(composeServiceConfig.ContainerName)
serviceConfig.ContainerName = newName
if newName != composeServiceConfig.ContainerName {
log.Infof("Container name in service %q has been changed from %q to %q", name, composeServiceConfig.ContainerName, newName)
}
serviceConfig.Command = composeServiceConfig.Entrypoint serviceConfig.Command = composeServiceConfig.Entrypoint
serviceConfig.Args = composeServiceConfig.Command serviceConfig.Args = composeServiceConfig.Command
serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
serviceConfig.BuildArgs = composeServiceConfig.Build.Args
envs := loadEnvVars(composeServiceConfig.Environment) envs := loadEnvVars(composeServiceConfig.Environment)
serviceConfig.Environment = envs serviceConfig.Environment = envs
@ -372,6 +377,7 @@ func (c *Compose) LoadFile(files []string) (kobject.KomposeObject, error) {
serviceConfig.Tty = composeServiceConfig.Tty serviceConfig.Tty = composeServiceConfig.Tty
serviceConfig.MemLimit = composeServiceConfig.MemLimit serviceConfig.MemLimit = composeServiceConfig.MemLimit
serviceConfig.TmpFs = composeServiceConfig.Tmpfs serviceConfig.TmpFs = composeServiceConfig.Tmpfs
serviceConfig.StopGracePeriod = composeServiceConfig.StopGracePeriod
komposeObject.ServiceConfigs[normalizeServiceNames(name)] = serviceConfig komposeObject.ServiceConfigs[normalizeServiceNames(name)] = serviceConfig
if normalizeServiceNames(name) != name { if normalizeServiceNames(name) != name {
log.Infof("Service name in docker-compose has been changed from %q to %q", name, normalizeServiceNames(name)) log.Infof("Service name in docker-compose has been changed from %q to %q", name, normalizeServiceNames(name))

View File

@ -27,6 +27,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"text/template" "text/template"
"time"
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
@ -38,10 +39,11 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"sort"
deployapi "github.com/openshift/origin/pkg/deploy/api" deployapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"sort"
) )
/** /**
@ -382,6 +384,13 @@ func (k *Kubernetes) UpdateKubernetesObjects(name string, service kobject.Servic
template.Spec.Containers[0].TTY = service.Tty template.Spec.Containers[0].TTY = service.Tty
template.Spec.Volumes = volumes template.Spec.Volumes = volumes
if service.StopGracePeriod != "" {
template.Spec.TerminationGracePeriodSeconds, err = DurationStrToSecondsInt(service.StopGracePeriod)
if err != nil {
log.Warningf("Failed to parse duration \"%v\" for service \"%v\"", service.StopGracePeriod, name)
}
}
// Configure the resource limits // Configure the resource limits
if service.MemLimit != 0 { if service.MemLimit != 0 {
memoryResourceList := api.ResourceList{ memoryResourceList := api.ResourceList{
@ -553,3 +562,16 @@ func SortedKeys(komposeObject kobject.KomposeObject) []string {
sort.Strings(sortedKeys) sort.Strings(sortedKeys)
return sortedKeys return sortedKeys
} }
//converts duration string to *int64 in seconds
func DurationStrToSecondsInt(s string) (*int64, error) {
if s == "" {
return nil, nil
}
duration, err := time.ParseDuration(s)
if err != nil {
return nil, err
}
r := (int64)(duration.Seconds())
return &r, nil
}

View File

@ -26,10 +26,11 @@ import (
"github.com/kubernetes-incubator/kompose/pkg/kobject" "github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/kubernetes-incubator/kompose/pkg/testutils" "github.com/kubernetes-incubator/kompose/pkg/testutils"
"reflect"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"reflect"
) )
/* /*
@ -296,3 +297,30 @@ func TestSortedKeys(t *testing.T) {
t.Logf("Test Fail output should be %s", c) t.Logf("Test Fail output should be %s", c)
} }
} }
//test conversion from duration string to seconds *int64
func TestDurationStrToSecondsInt(t *testing.T) {
testCases := map[string]struct {
in string
out *int64
}{
"5s": {in: "5s", out: &[]int64{5}[0]},
"1m30s": {in: "1m30s", out: &[]int64{90}[0]},
"empty": {in: "", out: nil},
"onlynumber": {in: "2", out: nil},
"illegal": {in: "abc", out: nil},
}
for name, test := range testCases {
result, _ := DurationStrToSecondsInt(test.in)
if test.out == nil && result != nil {
t.Errorf("Case '%v' for TestDurationStrToSecondsInt fail, Expected 'nil' , got '%v'", name, *result)
}
if test.out != nil && result == nil {
t.Errorf("Case '%v' for TestDurationStrToSecondsInt fail, Expected '%v' , got 'nil'", name, *test.out)
}
if test.out != nil && result != nil && *test.out != *result {
t.Errorf("Case '%v' for TestDurationStrToSecondsInt fail, Expected '%v' , got '%v'", name, *test.out, *result)
}
}
}

View File

@ -131,6 +131,7 @@ func (k *Kubernetes) InitRC(name string, service kobject.ServiceConfig, replicas
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name),
}, },
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: int32(replicas), Replicas: int32(replicas),
@ -172,6 +173,7 @@ func (k *Kubernetes) InitD(name string, service kobject.ServiceConfig, replicas
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name),
}, },
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Replicas: int32(replicas), Replicas: int32(replicas),
@ -192,6 +194,7 @@ func (k *Kubernetes) InitDS(name string, service kobject.ServiceConfig) *extensi
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name),
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Template: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
@ -514,6 +517,7 @@ func (k *Kubernetes) InitPod(name string, service kobject.ServiceConfig) *api.Po
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name),
}, },
Spec: k.InitPodSpec(name, service.Image), Spec: k.InitPodSpec(name, service.Image),
} }

View File

@ -311,7 +311,7 @@ func TestKomposeConvert(t *testing.T) {
if err := checkPodTemplate(config, d.Spec.Template, labels); err != nil { if err := checkPodTemplate(config, d.Spec.Template, labels); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if err := checkMeta(config, d.ObjectMeta, name, false); err != nil { if err := checkMeta(config, d.ObjectMeta, name, true); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if (int)(d.Spec.Replicas) != replicas { if (int)(d.Spec.Replicas) != replicas {
@ -328,7 +328,7 @@ func TestKomposeConvert(t *testing.T) {
if err := checkPodTemplate(config, ds.Spec.Template, labels); err != nil { if err := checkPodTemplate(config, ds.Spec.Template, labels); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if err := checkMeta(config, ds.ObjectMeta, name, false); err != nil { if err := checkMeta(config, ds.ObjectMeta, name, true); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if ds.Spec.Selector != nil && len(ds.Spec.Selector.MatchLabels) > 0 { if ds.Spec.Selector != nil && len(ds.Spec.Selector.MatchLabels) > 0 {
@ -342,7 +342,7 @@ func TestKomposeConvert(t *testing.T) {
if err := checkPodTemplate(config, *rc.Spec.Template, labels); err != nil { if err := checkPodTemplate(config, *rc.Spec.Template, labels); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if err := checkMeta(config, rc.ObjectMeta, name, false); err != nil { if err := checkMeta(config, rc.ObjectMeta, name, true); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if (int)(rc.Spec.Replicas) != replicas { if (int)(rc.Spec.Replicas) != replicas {
@ -360,7 +360,7 @@ func TestKomposeConvert(t *testing.T) {
if err := checkPodTemplate(config, *dc.Spec.Template, labels); err != nil { if err := checkPodTemplate(config, *dc.Spec.Template, labels); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if err := checkMeta(config, dc.ObjectMeta, name, false); err != nil { if err := checkMeta(config, dc.ObjectMeta, name, true); err != nil {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
if (int)(dc.Spec.Replicas) != replicas { if (int)(dc.Spec.Replicas) != replicas {

View File

@ -28,7 +28,6 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"k8s.io/kubernetes/pkg/api"
kapi "k8s.io/kubernetes/pkg/api" kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
@ -39,8 +38,11 @@ import (
"time" "time"
"reflect"
"github.com/kubernetes-incubator/kompose/pkg/transformer" "github.com/kubernetes-incubator/kompose/pkg/transformer"
buildapi "github.com/openshift/origin/pkg/build/api" buildapi "github.com/openshift/origin/pkg/build/api"
buildconfigreaper "github.com/openshift/origin/pkg/build/cmd"
deployapi "github.com/openshift/origin/pkg/deploy/api" deployapi "github.com/openshift/origin/pkg/deploy/api"
deploymentconfigreaper "github.com/openshift/origin/pkg/deploy/cmd" deploymentconfigreaper "github.com/openshift/origin/pkg/deploy/cmd"
imageapi "github.com/openshift/origin/pkg/image/api" imageapi "github.com/openshift/origin/pkg/image/api"
@ -50,7 +52,6 @@ import (
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"reflect"
) )
// OpenShift implements Transformer interface and represents OpenShift transformer // OpenShift implements Transformer interface and represents OpenShift transformer
@ -172,7 +173,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig,
if service.Build == "" { if service.Build == "" {
tags = map[string]imageapi.TagReference{ tags = map[string]imageapi.TagReference{
tag: imageapi.TagReference{ tag: imageapi.TagReference{
From: &api.ObjectReference{ From: &kapi.ObjectReference{
Kind: "DockerImage", Kind: "DockerImage",
Name: service.Image, Name: service.Image,
}, },
@ -186,7 +187,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig,
Kind: "ImageStream", Kind: "ImageStream",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: kapi.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name), Labels: transformer.ConfigLabels(name),
}, },
@ -200,6 +201,13 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig,
// initBuildConfig initialize Openshifts BuildConfig Object // initBuildConfig initialize Openshifts BuildConfig Object
func initBuildConfig(name string, service kobject.ServiceConfig, repo string, branch string) (*buildapi.BuildConfig, error) { func initBuildConfig(name string, service kobject.ServiceConfig, repo string, branch string) (*buildapi.BuildConfig, error) {
contextDir, err := getAbsBuildContext(service.Build) contextDir, err := getAbsBuildContext(service.Build)
envList := []kapi.EnvVar{}
for envName, envValue := range service.BuildArgs {
if *envValue == "\x00" {
*envValue = os.Getenv(envName)
}
envList = append(envList, kapi.EnvVar{Name: envName, Value: *envValue})
}
if err != nil { if err != nil {
return nil, errors.Wrap(err, name+"buildconfig cannot be created due to error in creating build context, getAbsBuildContext failed") return nil, errors.Wrap(err, name+"buildconfig cannot be created due to error in creating build context, getAbsBuildContext failed")
} }
@ -209,8 +217,10 @@ func initBuildConfig(name string, service kobject.ServiceConfig, repo string, br
Kind: "BuildConfig", Kind: "BuildConfig",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{
ObjectMeta: kapi.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name),
}, },
Spec: buildapi.BuildConfigSpec{ Spec: buildapi.BuildConfigSpec{
Triggers: []buildapi.BuildTriggerPolicy{ Triggers: []buildapi.BuildTriggerPolicy{
@ -229,12 +239,13 @@ func initBuildConfig(name string, service kobject.ServiceConfig, repo string, br
Strategy: buildapi.BuildStrategy{ Strategy: buildapi.BuildStrategy{
DockerStrategy: &buildapi.DockerBuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{
DockerfilePath: service.Dockerfile, DockerfilePath: service.Dockerfile,
Env: envList,
}, },
}, },
Output: buildapi.BuildOutput{ Output: buildapi.BuildOutput{
To: &kapi.ObjectReference{ To: &kapi.ObjectReference{
Kind: "ImageStreamTag", Kind: "ImageStreamTag",
Name: name + ":latest", Name: name + ":" + getImageTag(service.Image),
}, },
}, },
}, },
@ -258,7 +269,7 @@ func (o *OpenShift) initDeploymentConfig(name string, service kobject.ServiceCon
Kind: "DeploymentConfig", Kind: "DeploymentConfig",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: kapi.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name), Labels: transformer.ConfigLabels(name),
}, },
@ -266,8 +277,8 @@ func (o *OpenShift) initDeploymentConfig(name string, service kobject.ServiceCon
Replicas: int32(replicas), Replicas: int32(replicas),
Selector: transformer.ConfigLabels(name), Selector: transformer.ConfigLabels(name),
//UniqueLabelKey: p.Name, //UniqueLabelKey: p.Name,
Template: &api.PodTemplateSpec{ Template: &kapi.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: kapi.ObjectMeta{
Labels: transformer.ConfigLabels(name), Labels: transformer.ConfigLabels(name),
}, },
Spec: o.InitPodSpec(name, " "), Spec: o.InitPodSpec(name, " "),
@ -283,7 +294,7 @@ func (o *OpenShift) initDeploymentConfig(name string, service kobject.ServiceCon
//Automatic - if new tag is detected - update image update inside the pod template //Automatic - if new tag is detected - update image update inside the pod template
Automatic: true, Automatic: true,
ContainerNames: containerName, ContainerNames: containerName,
From: api.ObjectReference{ From: kapi.ObjectReference{
Name: name + ":" + tag, Name: name + ":" + tag,
Kind: "ImageStreamTag", Kind: "ImageStreamTag",
}, },
@ -301,7 +312,7 @@ func (o *OpenShift) initRoute(name string, service kobject.ServiceConfig, port i
Kind: "Route", Kind: "Route",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: kapi.ObjectMeta{
Name: name, Name: name,
Labels: transformer.ConfigLabels(name), Labels: transformer.ConfigLabels(name),
}, },
@ -488,13 +499,13 @@ func (o *OpenShift) Deploy(komposeObject kobject.KomposeObject, opt kobject.Conv
return err return err
} }
log.Infof("Successfully created DeploymentConfig: %s", t.Name) log.Infof("Successfully created DeploymentConfig: %s", t.Name)
case *api.Service: case *kapi.Service:
_, err := kclient.Services(namespace).Create(t) _, err := kclient.Services(namespace).Create(t)
if err != nil { if err != nil {
return err return err
} }
log.Infof("Successfully created Service: %s", t.Name) log.Infof("Successfully created Service: %s", t.Name)
case *api.PersistentVolumeClaim: case *kapi.PersistentVolumeClaim:
_, err := kclient.PersistentVolumeClaims(namespace).Create(t) _, err := kclient.PersistentVolumeClaims(namespace).Create(t)
if err != nil { if err != nil {
return err return err
@ -506,7 +517,7 @@ func (o *OpenShift) Deploy(komposeObject kobject.KomposeObject, opt kobject.Conv
return err return err
} }
log.Infof("Successfully created Route: %s", t.Name) log.Infof("Successfully created Route: %s", t.Name)
case *api.Pod: case *kapi.Pod:
_, err := kclient.Pods(namespace).Create(t) _, err := kclient.Pods(namespace).Create(t)
if err != nil { if err != nil {
return err return err
@ -554,7 +565,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
for _, v := range objects { for _, v := range objects {
label := labels.SelectorFromSet(labels.Set(map[string]string{transformer.Selector: v.(meta.Object).GetName()})) label := labels.SelectorFromSet(labels.Set(map[string]string{transformer.Selector: v.(meta.Object).GetName()}))
options := api.ListOptions{LabelSelector: label} options := kapi.ListOptions{LabelSelector: label}
komposeLabel := map[string]string{transformer.Selector: v.(meta.Object).GetName()} komposeLabel := map[string]string{transformer.Selector: v.(meta.Object).GetName()}
switch t := v.(type) { switch t := v.(type) {
case *imageapi.ImageStream: case *imageapi.ImageStream:
@ -576,7 +587,6 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
case *buildapi.BuildConfig: case *buildapi.BuildConfig:
//options := api.ListOptions{LabelSelector: label}
buildConfig, err := oclient.BuildConfigs(namespace).List(options) buildConfig, err := oclient.BuildConfigs(namespace).List(options)
if err != nil { if err != nil {
errorList = append(errorList, err) errorList = append(errorList, err)
@ -584,7 +594,8 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
for _, l := range buildConfig.Items { for _, l := range buildConfig.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) { if reflect.DeepEqual(l.Labels, komposeLabel) {
err := oclient.BuildConfigs(namespace).Delete(t.Name) bcreaper := buildconfigreaper.NewBuildConfigReaper(oclient)
err := bcreaper.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil { if err != nil {
errorList = append(errorList, err) errorList = append(errorList, err)
break break
@ -612,7 +623,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
} }
case *api.Service: case *kapi.Service:
//delete svc //delete svc
svc, err := kclient.Services(namespace).List(options) svc, err := kclient.Services(namespace).List(options)
if err != nil { if err != nil {
@ -621,7 +632,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
for _, l := range svc.Items { for _, l := range svc.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) { if reflect.DeepEqual(l.Labels, komposeLabel) {
rpService, err := kubectl.ReaperFor(api.Kind("Service"), kclient) rpService, err := kubectl.ReaperFor(kapi.Kind("Service"), kclient)
if err != nil { if err != nil {
errorList = append(errorList, err) errorList = append(errorList, err)
break break
@ -636,7 +647,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
} }
case *api.PersistentVolumeClaim: case *kapi.PersistentVolumeClaim:
// delete pvc // delete pvc
pvc, err := kclient.PersistentVolumeClaims(namespace).List(options) pvc, err := kclient.PersistentVolumeClaims(namespace).List(options)
if err != nil { if err != nil {
@ -672,7 +683,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
} }
case *api.Pod: case *kapi.Pod:
//delete pods //delete pods
pod, err := kclient.Pods(namespace).List(options) pod, err := kclient.Pods(namespace).List(options)
if err != nil { if err != nil {
@ -681,7 +692,7 @@ func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.Co
} }
for _, l := range pod.Items { for _, l := range pod.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) { if reflect.DeepEqual(l.Labels, komposeLabel) {
rpPod, err := kubectl.ReaperFor(api.Kind("Pod"), kclient) rpPod, err := kubectl.ReaperFor(kapi.Kind("Pod"), kclient)
if err != nil { if err != nil {
errorList = append(errorList, err) errorList = append(errorList, err)
break break

View File

@ -17,13 +17,13 @@ limitations under the License.
package openshift package openshift
import ( import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/runtime"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/runtime"
deployapi "github.com/openshift/origin/pkg/deploy/api" deployapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/kubernetes-incubator/kompose/pkg/kobject" "github.com/kubernetes-incubator/kompose/pkg/kobject"
@ -37,7 +37,7 @@ func newServiceConfig() kobject.ServiceConfig {
ContainerName: "myfoobarname", ContainerName: "myfoobarname",
Image: "image", Image: "image",
Environment: []kobject.EnvVar{kobject.EnvVar{Name: "env", Value: "value"}}, Environment: []kobject.EnvVar{kobject.EnvVar{Name: "env", Value: "value"}},
Port: []kobject.Ports{kobject.Ports{HostPort: 123, ContainerPort: 456, Protocol: api.ProtocolTCP}}, Port: []kobject.Ports{kobject.Ports{HostPort: 123, ContainerPort: 456, Protocol: kapi.ProtocolTCP}},
Command: []string{"cmd"}, Command: []string{"cmd"},
WorkingDir: "dir", WorkingDir: "dir",
Args: []string{"arg1", "arg2"}, Args: []string{"arg1", "arg2"},
@ -281,37 +281,66 @@ func TestGetAbsBuildContext(t *testing.T) {
// Test initializing buildconfig for a service // Test initializing buildconfig for a service
func TestInitBuildConfig(t *testing.T) { func TestInitBuildConfig(t *testing.T) {
serviceName := "serviceA"
repo := "https://git.test.com/org/repo1"
branch := "somebranch"
buildArgs := []kapi.EnvVar{{Name: "name", Value: "value"}}
value := "value"
testDir := "a/build"
dir := testutils.CreateLocalGitDirectory(t) dir := testutils.CreateLocalGitDirectory(t)
testutils.CreateSubdir(t, dir, "a/build") testutils.CreateSubdir(t, dir, testDir)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
serviceName := "serviceA" testCases := []struct {
repo := "https://git.test.com/org/repo" Name string
branch := "somebranch" ServiceConfig kobject.ServiceConfig
sc := kobject.ServiceConfig{ }{
Build: filepath.Join(dir, "a/build"), {
Name: "Service config without image key",
ServiceConfig: kobject.ServiceConfig{
Build: filepath.Join(dir, testDir),
Dockerfile: "Dockerfile-alternate", Dockerfile: "Dockerfile-alternate",
BuildArgs: map[string]*string{"name": &value},
},
},
{
Name: "Service config with image key",
ServiceConfig: kobject.ServiceConfig{
Build: filepath.Join(dir, testDir),
Dockerfile: "Dockerfile-alternate",
BuildArgs: map[string]*string{"name": &value},
Image: "foo:bar",
},
},
} }
bc, err := initBuildConfig(serviceName, sc, repo, branch)
for _, test := range testCases {
bc, err := initBuildConfig(serviceName, test.ServiceConfig, repo, branch)
if err != nil { if err != nil {
t.Error(errors.Wrap(err, "initBuildConfig failed")) t.Error(errors.Wrap(err, "initBuildConfig failed"))
} }
testCases := map[string]struct { assertions := map[string]struct {
field string field string
value string value string
}{ }{
"Assert buildconfig source git URI": {bc.Spec.CommonSpec.Source.Git.URI, repo}, "Assert buildconfig source git URI": {bc.Spec.CommonSpec.Source.Git.URI, repo},
"Assert buildconfig source git Ref": {bc.Spec.CommonSpec.Source.Git.Ref, branch}, "Assert buildconfig source git Ref": {bc.Spec.CommonSpec.Source.Git.Ref, branch},
"Assert buildconfig source context dir": {bc.Spec.CommonSpec.Source.ContextDir, "a/build/"}, "Assert buildconfig source context dir": {bc.Spec.CommonSpec.Source.ContextDir, testDir + "/"},
"Assert buildconfig output name": {bc.Spec.CommonSpec.Output.To.Name, serviceName + ":latest"}, // BuildConfig output image is named after service name. If image key is set than tag from that is used.
"Assert buildconfig dockerfilepath": {bc.Spec.CommonSpec.Strategy.DockerStrategy.DockerfilePath, "Dockerfile-alternate"}, "Assert buildconfig output name": {bc.Spec.CommonSpec.Output.To.Name, serviceName + ":" + getImageTag(test.ServiceConfig.Image)},
"Assert buildconfig dockerfilepath": {bc.Spec.CommonSpec.Strategy.DockerStrategy.DockerfilePath, test.ServiceConfig.Dockerfile},
} }
for name, test := range testCases { for name, assertionTest := range assertions {
t.Log("Test case: ", name) if assertionTest.field != assertionTest.value {
if test.field != test.value { t.Errorf("%s Expected: %#v, got: %#v", name, assertionTest.value, assertionTest.field)
t.Errorf("Expected: %#v, got: %#v", test.value, test.field) }
}
if !reflect.DeepEqual(bc.Spec.CommonSpec.Strategy.DockerStrategy.Env, buildArgs) {
t.Errorf("Expected: %#v, got: %#v", bc.Spec.CommonSpec.Strategy.DockerStrategy.Env, buildArgs)
} }
} }
} }

View File

@ -20,15 +20,13 @@ source $KOMPOSE_ROOT/script/test/cmd/lib.sh
# Get current branch and remote url of git repository # Get current branch and remote url of git repository
branch=$(git branch | grep \* | cut -d ' ' -f2-) branch=$(git branch | grep \* | cut -d ' ' -f2-)
uri=$(git remote get-url origin) uri=$(git config --get remote.origin.url)
if [[ $uri != *".git"* ]]; then if [[ $uri != *".git"* ]]; then
uri="${uri}.git" uri="${uri}.git"
fi fi
# Warning Template # Warning Template
warning="Buildconfig using $uri::$branch as source." warning="Buildconfig using $uri::$branch as source."
# Replacing variables with current branch and uri
sed -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template.json > /tmp/output-os.json
####### #######
# Tests related to docker-compose file in /script/test/fixtures/etherpad # Tests related to docker-compose file in /script/test/fixtures/etherpad
@ -58,7 +56,11 @@ unset $(cat $KOMPOSE_ROOT/script/test/fixtures/gitlab/envs | cut -d'=' -f1)
# kubernetes test # kubernetes test
convert::expect_success_and_warning "kompose -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose.yml convert --stdout -j" "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-k8s.json" "Kubernetes provider doesn't support build key - ignoring" convert::expect_success_and_warning "kompose -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose.yml convert --stdout -j" "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-k8s.json" "Kubernetes provider doesn't support build key - ignoring"
# openshift test # openshift test
# Replacing variables with current branch and uri
sed -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template.json > /tmp/output-os.json
convert::expect_success_and_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose.yml convert --stdout -j" "/tmp/output-os.json" "$warning" convert::expect_success_and_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose.yml convert --stdout -j" "/tmp/output-os.json" "$warning"
rm /tmp/output-os.json
###### ######
# Tests related to docker-compose file in /script/test/fixtures/entrypoint-command # Tests related to docker-compose file in /script/test/fixtures/entrypoint-command
# kubernetes test # kubernetes test
@ -209,6 +211,8 @@ convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixture
#### ####
# Test regarding build context (running kompose from various directories) # Test regarding build context (running kompose from various directories)
# Replacing variables with current branch and uri
sed -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template.json > /tmp/output-os.json
CURRENT_DIR=$(pwd) CURRENT_DIR=$(pwd)
cd "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/" cd "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/"
convert::expect_success_and_warning "kompose convert --provider openshift --stdout -j" "/tmp/output-os.json" "$warning" convert::expect_success_and_warning "kompose convert --provider openshift --stdout -j" "/tmp/output-os.json" "$warning"
@ -217,6 +221,15 @@ convert::expect_success_and_warning "kompose convert --provider openshift --stdo
cd "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/node" cd "$KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/node"
convert::expect_success_and_warning "kompose convert --provider openshift --stdout -j -f ../docker-compose.yml" "/tmp/output-os.json" "$warning" convert::expect_success_and_warning "kompose convert --provider openshift --stdout -j -f ../docker-compose.yml" "/tmp/output-os.json" "$warning"
cd $CURRENT_DIR cd $CURRENT_DIR
rm /tmp/output-os.json
# Test the presence of build args in buildconfig
# Replacing variables with current branch and uri
sed -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/buildargs/output-os-template.json > /tmp/output-buildarg-os.json
export $(cat $KOMPOSE_ROOT/script/test/fixtures/buildargs/envs)
convert::expect_success_and_warning "kompose --provider openshift -f $KOMPOSE_ROOT/script/test/fixtures/buildargs/docker-compose.yml convert --stdout -j" "/tmp/output-buildarg-os.json" "$warning"
rm /tmp/output-buildarg-os.json
# Test related to support docker-compose.yaml beside docker-compose.yml # Test related to support docker-compose.yaml beside docker-compose.yml
# Store the original path # Store the original path
@ -234,7 +247,4 @@ convert::expect_success "kompose --provider=openshift convert --stdout -j" "$KOM
# Return back to the original path # Return back to the original path
cd $CURRENT_DIR cd $CURRENT_DIR
# Removes generated output
rm -rf /tmp/output-os.json
exit $EXIT_STATUS exit $EXIT_STATUS

View File

@ -0,0 +1,11 @@
## Docker Compose Buildargs
### Usage
The simplest thing to do:
```bash
export $(cat envs)
```
To customize the values edit `envs` file.

View File

@ -0,0 +1,2 @@
FROM busybox
RUN touch /test

View File

@ -0,0 +1,16 @@
version: "2"
services:
foo:
build:
context: "./build"
args:
NAME: web
command: "sleep 3600"
foo1:
build:
context: "./build"
args:
- NAME=web
- foo
command: "sleep 3600"

1
script/test/fixtures/buildargs/envs vendored Normal file
View File

@ -0,0 +1 @@
foo=bar

View File

@ -0,0 +1,325 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo1"
}
},
"spec": {
"ports": [
{
"name": "headless",
"port": 55555,
"targetPort": 0
}
],
"selector": {
"io.kompose.service": "foo1"
},
"clusterIP": "None"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"ports": [
{
"name": "headless",
"port": 55555,
"targetPort": 0
}
],
"selector": {
"io.kompose.service": "foo"
},
"clusterIP": "None"
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "foo1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo1"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"foo1"
],
"from": {
"kind": "ImageStreamTag",
"name": "foo1:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "foo1"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo1"
}
},
"spec": {
"containers": [
{
"name": "foo1",
"image": " ",
"args": [
"sleep",
"3600"
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "foo1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo1"
}
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "foo1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo1"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/buildargs/build/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {
"env": [
{
"name": "NAME",
"value": "web"
},
{
"name": "foo",
"value": "bar"
}
]
}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "foo1:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"foo"
],
"from": {
"kind": "ImageStreamTag",
"name": "foo:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "foo"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"containers": [
{
"name": "foo",
"image": " ",
"args": [
"sleep",
"3600"
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "foo",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "foo"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/buildargs/build/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {
"env": [
{
"name": "NAME",
"value": "web"
}
]
}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "foo:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
}
]
}

View File

@ -60,7 +60,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -96,7 +99,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -146,6 +146,9 @@
"metadata": { "metadata": {
"name": "db", "name": "db",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "db"
},
"annotations": { "annotations": {
"com.example.description": "Postgres Database" "com.example.description": "Postgres Database"
} }
@ -184,7 +187,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -220,7 +226,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "result", "name": "result",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "result"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -257,6 +266,9 @@
"metadata": { "metadata": {
"name": "vote", "name": "vote",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "vote"
},
"annotations": { "annotations": {
"com.example.description": "Vote" "com.example.description": "Vote"
} }
@ -295,7 +307,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "worker", "name": "worker",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "worker"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -35,7 +35,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "base", "name": "base",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "base"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -356,7 +356,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-api", "name": "hygieia-api",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-api"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -430,7 +433,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-bitbucket-scm-collector", "name": "hygieia-bitbucket-scm-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-bitbucket-scm-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -499,7 +505,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-chat-ops-collector", "name": "hygieia-chat-ops-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-chat-ops-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -568,7 +577,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-github-scm-collector", "name": "hygieia-github-scm-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-github-scm-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -637,7 +649,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-jenkins-build-collector", "name": "hygieia-jenkins-build-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-jenkins-build-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -706,7 +721,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-jenkins-cucumber-test-collector", "name": "hygieia-jenkins-cucumber-test-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-jenkins-cucumber-test-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -775,7 +793,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-jira-feature-collector", "name": "hygieia-jira-feature-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-jira-feature-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -854,7 +875,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-sonar-codequality-collector", "name": "hygieia-sonar-codequality-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-sonar-codequality-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -923,7 +947,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-subversion-scm-collector", "name": "hygieia-subversion-scm-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-subversion-scm-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -992,7 +1019,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-udeploy-collector", "name": "hygieia-udeploy-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-udeploy-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -1075,7 +1105,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-ui", "name": "hygieia-ui",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-ui"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -1111,7 +1144,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "hygieia-versionone-collector", "name": "hygieia-versionone-collector",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "hygieia-versionone-collector"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -1180,7 +1216,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "mongodb", "name": "mongodb",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "mongodb"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -60,7 +60,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "etherpad", "name": "etherpad",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "etherpad"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -118,7 +121,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "mariadb", "name": "mariadb",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "mariadb"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -68,7 +68,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -105,6 +108,9 @@
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
},
"annotations": { "annotations": {
"kompose.service.expose": "batman.example.com" "kompose.service.expose": "batman.example.com"
} }

View File

@ -63,7 +63,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -100,6 +103,9 @@
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
},
"annotations": { "annotations": {
"kompose.service.expose": "batman.example.com" "kompose.service.expose": "batman.example.com"
} }

View File

@ -68,7 +68,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -105,6 +108,9 @@
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
},
"annotations": { "annotations": {
"kompose.service.expose": "True" "kompose.service.expose": "True"
} }

View File

@ -63,7 +63,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -100,6 +103,9 @@
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null, "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
},
"annotations": { "annotations": {
"kompose.service.expose": "True" "kompose.service.expose": "True"
} }

View File

@ -96,7 +96,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "gitlab", "name": "gitlab",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "gitlab"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -172,7 +175,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "postgresql", "name": "postgresql",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "postgresql"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -222,7 +228,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -86,7 +86,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "frontend", "name": "frontend",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "frontend"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -140,7 +143,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis-master", "name": "redis-master",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-master"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -176,7 +182,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis-slave", "name": "redis-slave",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis-slave"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -40,7 +40,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -40,7 +40,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -70,7 +70,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "etherpad", "name": "etherpad",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "etherpad"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -131,7 +134,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "mariadb", "name": "mariadb",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "mariadb"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -138,7 +138,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "nginx", "name": "nginx",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -173,7 +176,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "node1", "name": "node1",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -208,7 +214,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "node2", "name": "node2",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -243,7 +252,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "node3", "name": "node3",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -278,7 +290,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -216,7 +216,10 @@
"apiVersion": "v1", "apiVersion": "v1",
"metadata": { "metadata": {
"name": "nginx", "name": "nginx",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
}, },
"spec": { "spec": {
"triggers": [ "triggers": [
@ -337,7 +340,10 @@
"apiVersion": "v1", "apiVersion": "v1",
"metadata": { "metadata": {
"name": "node1", "name": "node1",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
}
}, },
"spec": { "spec": {
"triggers": [ "triggers": [
@ -458,7 +464,10 @@
"apiVersion": "v1", "apiVersion": "v1",
"metadata": { "metadata": {
"name": "node2", "name": "node2",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
}
}, },
"spec": { "spec": {
"triggers": [ "triggers": [
@ -579,7 +588,10 @@
"apiVersion": "v1", "apiVersion": "v1",
"metadata": { "metadata": {
"name": "node3", "name": "node3",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
}
}, },
"spec": { "spec": {
"triggers": [ "triggers": [

View File

@ -66,7 +66,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -106,7 +109,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -66,7 +66,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -106,7 +109,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -66,7 +66,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "mariadb", "name": "mariadb",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "mariadb"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -149,7 +152,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "wordpress", "name": "wordpress",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "wordpress"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -34,7 +34,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "client", "name": "client",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "client"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -34,7 +34,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "client", "name": "client",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "client"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -34,7 +34,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "httpd", "name": "httpd",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "httpd"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -60,7 +60,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "nginx", "name": "nginx",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -144,7 +147,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -60,7 +60,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -96,7 +99,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -60,7 +60,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "redis", "name": "redis",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,
@ -96,7 +99,10 @@
"apiVersion": "extensions/v1beta1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "web", "name": "web",
"creationTimestamp": null "creationTimestamp": null,
"labels": {
"io.kompose.service": "web"
}
}, },
"spec": { "spec": {
"replicas": 1, "replicas": 1,

View File

@ -281,6 +281,7 @@ var servicesSchemaDataV2 = `{
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"}, "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"group_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"hostname": {"type": "string"}, "hostname": {"type": "string"},
"image": {"type": "string"}, "image": {"type": "string"},
"ipc": {"type": "string"}, "ipc": {"type": "string"},
@ -347,6 +348,7 @@ var servicesSchemaDataV2 = `{
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]}, "shm_size": {"type": ["number", "string"]},
"stdin_open": {"type": "boolean"}, "stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string"},
"stop_signal": {"type": "string"}, "stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"}, "tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"}, "tty": {"type": "boolean"},

View File

@ -129,6 +129,7 @@ type ServiceConfig struct {
Privileged bool `yaml:"privileged,omitempty"` Privileged bool `yaml:"privileged,omitempty"`
SecurityOpt []string `yaml:"security_opt,omitempty"` SecurityOpt []string `yaml:"security_opt,omitempty"`
ShmSize yaml.MemStringorInt `yaml:"shm_size,omitempty"` ShmSize yaml.MemStringorInt `yaml:"shm_size,omitempty"`
StopGracePeriod string `yaml:"stop_grace_period,omitempty"`
StopSignal string `yaml:"stop_signal,omitempty"` StopSignal string `yaml:"stop_signal,omitempty"`
Tmpfs yaml.Stringorslice `yaml:"tmpfs,omitempty"` Tmpfs yaml.Stringorslice `yaml:"tmpfs,omitempty"`
VolumeDriver string `yaml:"volume_driver,omitempty"` VolumeDriver string `yaml:"volume_driver,omitempty"`

View File

@ -288,7 +288,8 @@ func (p *Project) handleNetworkConfig() {
// Consolidate the name of the network // Consolidate the name of the network
// FIXME(vdemeester) probably shouldn't be there, maybe move that to interface/factory // FIXME(vdemeester) probably shouldn't be there, maybe move that to interface/factory
for _, network := range serviceConfig.Networks.Networks { for _, network := range serviceConfig.Networks.Networks {
if net, ok := p.NetworkConfigs[network.Name]; ok { net, ok := p.NetworkConfigs[network.Name]
if ok && net != nil {
if net.External.External { if net.External.External {
network.RealName = network.Name network.RealName = network.Name
if net.External.Name != "" { if net.External.Name != "" {
@ -297,6 +298,12 @@ func (p *Project) handleNetworkConfig() {
} else { } else {
network.RealName = p.Name + "_" + network.Name network.RealName = p.Name + "_" + network.Name
} }
} else {
network.RealName = p.Name + "_" + network.Name
p.NetworkConfigs[network.Name] = &config.NetworkConfig{
External: yaml.External{External: false},
}
} }
// Ignoring if we don't find the network, it will be catched later // Ignoring if we don't find the network, it will be catched later
} }

View File

@ -3,6 +3,7 @@ package utils
import ( import (
"encoding/json" "encoding/json"
"sync" "sync"
"time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -160,3 +161,18 @@ func ConvertKeysToStrings(item interface{}) interface{} {
return item return item
} }
} }
// DurationStrToSecondsInt converts duration string to *int in seconds
func DurationStrToSecondsInt(s string) *int {
if s == "" {
return nil
}
duration, err := time.ParseDuration(s)
if err != nil {
logrus.Errorf("Failed to parse duration:%v", s)
return nil
}
r := (int)(duration.Seconds())
return &r
}

View File

@ -89,7 +89,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
switch k.Kind() { switch k.Kind() {
case reflect.Bool: case reflect.Bool:
return d.decodeBool(name, node, result) return d.decodeBool(name, node, result)
case reflect.Float64: case reflect.Float32, reflect.Float64:
return d.decodeFloat(name, node, result) return d.decodeFloat(name, node, result)
case reflect.Int, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result) return d.decodeInt(name, node, result)
@ -143,7 +143,7 @@ func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value)
return err return err
} }
result.Set(reflect.ValueOf(v)) result.Set(reflect.ValueOf(v).Convert(result.Type()))
return nil return nil
} }
} }

View File

@ -3,6 +3,7 @@
package parser package parser
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
// Parse returns the fully parsed source and returns the abstract syntax tree. // Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) { func Parse(src []byte) (*ast.File, error) {
// normalize all line endings
// since the scanner and output only work with "\n" line endings, we may
// end up with dangling "\r" characters in the parsed data.
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
p := newParser(src) p := newParser(src)
return p.Parse() return p.Parse()
} }

View File

@ -121,6 +121,11 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
} }
} }
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook( func WeaklyTypedHook(
f reflect.Kind, f reflect.Kind,
t reflect.Kind, t reflect.Kind,
@ -132,9 +137,8 @@ func WeaklyTypedHook(
case reflect.Bool: case reflect.Bool:
if dataVal.Bool() { if dataVal.Bool() {
return "1", nil return "1", nil
} else {
return "0", nil
} }
return "0", nil
case reflect.Float32: case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int: case reflect.Int:

View File

@ -1,5 +1,5 @@
// The mapstructure package exposes functionality to convert an // Package mapstructure exposes functionality to convert an arbitrary
// arbitrary map[string]interface{} into a native Go structure. // map[string]interface{} into a native Go structure.
// //
// The Go structure can be arbitrarily complex, containing slices, // The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested // other structs, etc. and the decoder will properly decode nested
@ -32,7 +32,12 @@ import (
// both. // both.
type DecodeHookFunc interface{} type DecodeHookFunc interface{}
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
// the source and target types.
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder // DecoderConfig is the configuration that is used to create a new decoder
@ -436,7 +441,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
case dataKind == reflect.Uint: case dataKind == reflect.Uint:
val.SetFloat(float64(dataVal.Uint())) val.SetFloat(float64(dataVal.Uint()))
case dataKind == reflect.Float32: case dataKind == reflect.Float32:
val.SetFloat(float64(dataVal.Float())) val.SetFloat(dataVal.Float())
case dataKind == reflect.Bool && d.config.WeaklyTypedInput: case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() { if dataVal.Bool() {
val.SetFloat(1) val.SetFloat(1)

View File

@ -0,0 +1,2 @@
// Package cmd provides command helpers for builds
package cmd

View File

@ -0,0 +1,150 @@
package cmd
import (
"sort"
"strings"
"time"
"github.com/golang/glog"
kapi "k8s.io/kubernetes/pkg/api"
kerrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
ktypes "k8s.io/kubernetes/pkg/types"
kutilerrors "k8s.io/kubernetes/pkg/util/errors"
buildapi "github.com/openshift/origin/pkg/build/api"
buildutil "github.com/openshift/origin/pkg/build/util"
"github.com/openshift/origin/pkg/client"
"github.com/openshift/origin/pkg/util"
)
// NewBuildConfigReaper returns a new reaper for buildConfigs
func NewBuildConfigReaper(oc *client.Client) kubectl.Reaper {
return &BuildConfigReaper{oc: oc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout}
}
// BuildConfigReaper implements the Reaper interface for buildConfigs
type BuildConfigReaper struct {
oc client.Interface
pollInterval, timeout time.Duration
}
// Stop deletes the build configuration and all of the associated builds.
func (reaper *BuildConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
_, err := reaper.oc.BuildConfigs(namespace).Get(name)
if err != nil {
return err
}
var bcPotentialBuilds []buildapi.Build
// Collect builds related to the config.
builds, err := reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelector(name)})
if err != nil {
return err
}
bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...)
// Collect deprecated builds related to the config.
// TODO: Delete this block after BuildConfigLabelDeprecated is removed.
builds, err = reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelectorDeprecated(name)})
if err != nil {
return err
}
bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...)
// A map of builds associated with this build configuration
bcBuilds := make(map[ktypes.UID]buildapi.Build)
// Because of name length limits in the BuildConfigSelector, annotations are used to ensure
// reliable selection of associated builds.
for _, build := range bcPotentialBuilds {
if build.Annotations != nil {
if bcName, ok := build.Annotations[buildapi.BuildConfigAnnotation]; ok {
// The annotation, if present, has the full build config name.
if bcName != name {
// If the name does not match exactly, the build is not truly associated with the build configuration
continue
}
}
}
// Note that if there is no annotation, this is a deprecated build spec
// and we choose to include it in the deletion having matched only the BuildConfigSelectorDeprecated
// Use a map to union the lists returned by the contemporary & deprecated build queries
// (there will be overlap between the lists, and we only want to try to delete each build once)
bcBuilds[build.UID] = build
}
// If there are builds associated with this build configuration, pause it before attempting the deletion
if len(bcBuilds) > 0 {
// Add paused annotation to the build config pending the deletion
err = unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
bc, err := reaper.oc.BuildConfigs(namespace).Get(name)
if err != nil {
return err
}
// Ignore if the annotation already exists
if strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" {
return nil
}
// Set the annotation and update
if err := util.AddObjectAnnotations(bc, map[string]string{buildapi.BuildConfigPausedAnnotation: "true"}); err != nil {
return err
}
_, err = reaper.oc.BuildConfigs(namespace).Update(bc)
return err
})
if err != nil {
return err
}
}
// Warn the user if the BuildConfig won't get deleted after this point.
bcDeleted := false
defer func() {
if !bcDeleted {
glog.Warningf("BuildConfig %s/%s will not be deleted because not all associated builds could be deleted. You can try re-running the command or removing them manually", namespace, name)
}
}()
// For the benefit of test cases, sort the UIDs so that the deletion order is deterministic
buildUIDs := make([]string, 0, len(bcBuilds))
for buildUID := range bcBuilds {
buildUIDs = append(buildUIDs, string(buildUID))
}
sort.Strings(buildUIDs)
errList := []error{}
for _, buildUID := range buildUIDs {
build := bcBuilds[ktypes.UID(buildUID)]
if err := reaper.oc.Builds(namespace).Delete(build.Name); err != nil {
glog.Warningf("Cannot delete Build %s/%s: %v", build.Namespace, build.Name, err)
if !kerrors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
// Aggregate all errors
if len(errList) > 0 {
return kutilerrors.NewAggregate(errList)
}
if err := reaper.oc.BuildConfigs(namespace).Delete(name); err != nil {
return err
}
bcDeleted = true
return nil
}

4
vendor/github.com/openshift/origin/pkg/util/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package util implements various utility functions used in both testing and
// implementation of OpenShift. Package util may not depend on any other
// package in the OpenShift package tree.
package util

21
vendor/github.com/openshift/origin/pkg/util/etcd.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package util
import (
"path"
kapi "k8s.io/kubernetes/pkg/api"
kerrors "k8s.io/kubernetes/pkg/api/errors"
)
// NoNamespaceKeyFunc is the default function for constructing etcd paths to a resource relative to prefix enforcing
// If a namespace is on context, it errors.
func NoNamespaceKeyFunc(ctx kapi.Context, prefix string, name string) (string, error) {
ns, ok := kapi.NamespaceFrom(ctx)
if ok && len(ns) > 0 {
return "", kerrors.NewBadRequest("Namespace parameter is not allowed.")
}
if len(name) == 0 {
return "", kerrors.NewBadRequest("Name parameter required.")
}
return path.Join(prefix, name), nil
}

286
vendor/github.com/openshift/origin/pkg/util/labels.go generated vendored Normal file
View File

@ -0,0 +1,286 @@
package util
import (
"fmt"
"reflect"
kmeta "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
deployapi "github.com/openshift/origin/pkg/deploy/api"
)
// MergeInto flags
const (
OverwriteExistingDstKey = 1 << iota
ErrorOnExistingDstKey
ErrorOnDifferentDstKeyValue
)
// AddObjectLabelsWithFlags will set labels on the target object. Label overwrite behavior
// is controlled by the flags argument.
func AddObjectLabelsWithFlags(obj runtime.Object, labels labels.Set, flags int) error {
if labels == nil {
return nil
}
accessor, err := kmeta.Accessor(obj)
if err != nil {
if _, ok := obj.(*runtime.Unstructured); !ok {
// error out if it's not possible to get an accessor and it's also not an unstructured object
return err
}
} else {
metaLabels := accessor.GetLabels()
if metaLabels == nil {
metaLabels = make(map[string]string)
}
switch objType := obj.(type) {
case *deployapi.DeploymentConfig:
if err := addDeploymentConfigNestedLabels(objType, labels, flags); err != nil {
return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err)
}
}
if err := MergeInto(metaLabels, labels, flags); err != nil {
return fmt.Errorf("unable to add labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err)
}
accessor.SetLabels(metaLabels)
return nil
}
// handle unstructured object
// TODO: allow meta.Accessor to handle runtime.Unstructured
if unstruct, ok := obj.(*runtime.Unstructured); ok && unstruct.Object != nil {
// the presence of "metadata" is sufficient for us to apply the rules for Kube-like
// objects.
// TODO: add swagger detection to allow this to happen more effectively
if obj, ok := unstruct.Object["metadata"]; ok {
if m, ok := obj.(map[string]interface{}); ok {
existing := make(map[string]string)
if l, ok := m["labels"]; ok {
if found, ok := interfaceToStringMap(l); ok {
existing = found
}
}
if err := MergeInto(existing, labels, flags); err != nil {
return err
}
m["labels"] = mapToGeneric(existing)
}
return nil
}
// only attempt to set root labels if a root object called labels exists
// TODO: add swagger detection to allow this to happen more effectively
if obj, ok := unstruct.Object["labels"]; ok {
existing := make(map[string]string)
if found, ok := interfaceToStringMap(obj); ok {
existing = found
}
if err := MergeInto(existing, labels, flags); err != nil {
return err
}
unstruct.Object["labels"] = mapToGeneric(existing)
return nil
}
}
return nil
}
// AddObjectLabels adds new label(s) to a single runtime.Object, overwriting
// existing labels that have the same key.
func AddObjectLabels(obj runtime.Object, labels labels.Set) error {
return AddObjectLabelsWithFlags(obj, labels, OverwriteExistingDstKey)
}
// AddObjectAnnotations adds new annotation(s) to a single runtime.Object
func AddObjectAnnotations(obj runtime.Object, annotations map[string]string) error {
if len(annotations) == 0 {
return nil
}
accessor, err := kmeta.Accessor(obj)
if err != nil {
if _, ok := obj.(*runtime.Unstructured); !ok {
// error out if it's not possible to get an accessor and it's also not an unstructured object
return err
}
} else {
metaAnnotations := accessor.GetAnnotations()
if metaAnnotations == nil {
metaAnnotations = make(map[string]string)
}
switch objType := obj.(type) {
case *deployapi.DeploymentConfig:
if err := addDeploymentConfigNestedAnnotations(objType, annotations); err != nil {
return fmt.Errorf("unable to add nested annotations to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err)
}
}
MergeInto(metaAnnotations, annotations, OverwriteExistingDstKey)
accessor.SetAnnotations(metaAnnotations)
return nil
}
// handle unstructured object
// TODO: allow meta.Accessor to handle runtime.Unstructured
if unstruct, ok := obj.(*runtime.Unstructured); ok && unstruct.Object != nil {
// the presence of "metadata" is sufficient for us to apply the rules for Kube-like
// objects.
// TODO: add swagger detection to allow this to happen more effectively
if obj, ok := unstruct.Object["metadata"]; ok {
if m, ok := obj.(map[string]interface{}); ok {
existing := make(map[string]string)
if l, ok := m["annotations"]; ok {
if found, ok := interfaceToStringMap(l); ok {
existing = found
}
}
if err := MergeInto(existing, annotations, OverwriteExistingDstKey); err != nil {
return err
}
m["annotations"] = mapToGeneric(existing)
}
return nil
}
// only attempt to set root annotations if a root object called annotations exists
// TODO: add swagger detection to allow this to happen more effectively
if obj, ok := unstruct.Object["annotations"]; ok {
existing := make(map[string]string)
if found, ok := interfaceToStringMap(obj); ok {
existing = found
}
if err := MergeInto(existing, annotations, OverwriteExistingDstKey); err != nil {
return err
}
unstruct.Object["annotations"] = mapToGeneric(existing)
return nil
}
}
return nil
}
// addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object
func addDeploymentConfigNestedLabels(obj *deployapi.DeploymentConfig, labels labels.Set, flags int) error {
if obj.Spec.Template.Labels == nil {
obj.Spec.Template.Labels = make(map[string]string)
}
if err := MergeInto(obj.Spec.Template.Labels, labels, flags); err != nil {
return fmt.Errorf("unable to add labels to Template.DeploymentConfig.Template.ControllerTemplate.Template: %v", err)
}
return nil
}
func addDeploymentConfigNestedAnnotations(obj *deployapi.DeploymentConfig, annotations map[string]string) error {
if obj.Spec.Template == nil {
return nil
}
if obj.Spec.Template.Annotations == nil {
obj.Spec.Template.Annotations = make(map[string]string)
}
if err := MergeInto(obj.Spec.Template.Annotations, annotations, OverwriteExistingDstKey); err != nil {
return fmt.Errorf("unable to add annotations to Template.DeploymentConfig.Template.ControllerTemplate.Template: %v", err)
}
return nil
}
// interfaceToStringMap extracts a map[string]string from a map[string]interface{}
func interfaceToStringMap(obj interface{}) (map[string]string, bool) {
if obj == nil {
return nil, false
}
lm, ok := obj.(map[string]interface{})
if !ok {
return nil, false
}
existing := make(map[string]string)
for k, v := range lm {
switch t := v.(type) {
case string:
existing[k] = t
}
}
return existing, true
}
// mapToGeneric converts a map[string]string into a map[string]interface{}
func mapToGeneric(obj map[string]string) map[string]interface{} {
if obj == nil {
return nil
}
res := make(map[string]interface{})
for k, v := range obj {
res[k] = v
}
return res
}
// MergeInto merges items from a src map into a dst map.
// Returns an error when the maps are not of the same type.
// Flags:
// - ErrorOnExistingDstKey
// When set: Return an error if any of the dst keys is already set.
// - ErrorOnDifferentDstKeyValue
// When set: Return an error if any of the dst keys is already set
// to a different value than src key.
// - OverwriteDstKey
// When set: Overwrite existing dst key value with src key value.
func MergeInto(dst, src interface{}, flags int) error {
dstVal := reflect.ValueOf(dst)
srcVal := reflect.ValueOf(src)
if dstVal.Kind() != reflect.Map {
return fmt.Errorf("dst is not a valid map: %v", dstVal.Kind())
}
if srcVal.Kind() != reflect.Map {
return fmt.Errorf("src is not a valid map: %v", srcVal.Kind())
}
if dstTyp, srcTyp := dstVal.Type(), srcVal.Type(); !dstTyp.AssignableTo(srcTyp) {
return fmt.Errorf("type mismatch, can't assign '%v' to '%v'", srcTyp, dstTyp)
}
if dstVal.IsNil() {
return fmt.Errorf("dst value is nil")
}
if srcVal.IsNil() {
// Nothing to merge
return nil
}
for _, k := range srcVal.MapKeys() {
if dstVal.MapIndex(k).IsValid() {
if flags&ErrorOnExistingDstKey != 0 {
return fmt.Errorf("dst key already set (ErrorOnExistingDstKey=1), '%v'='%v'", k, dstVal.MapIndex(k))
}
if dstVal.MapIndex(k).String() != srcVal.MapIndex(k).String() {
if flags&ErrorOnDifferentDstKeyValue != 0 {
return fmt.Errorf("dst key already set to a different value (ErrorOnDifferentDstKeyValue=1), '%v'='%v'", k, dstVal.MapIndex(k))
}
if flags&OverwriteExistingDstKey != 0 {
dstVal.SetMapIndex(k, srcVal.MapIndex(k))
}
}
} else {
dstVal.SetMapIndex(k, srcVal.MapIndex(k))
}
}
return nil
}

21
vendor/github.com/openshift/origin/pkg/util/strings.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package util
import "sort"
// UniqueStrings returns a sorted, uniquified slice of the specified strings
func UniqueStrings(strings []string) []string {
m := make(map[string]bool, len(strings))
for _, s := range strings {
m[s] = true
}
i := 0
strings = make([]string, len(m), len(m))
for s := range m {
strings[i] = s
i++
}
sort.Strings(strings)
return strings
}

View File

@ -13,14 +13,14 @@
// // load TOML data stored in a string // // load TOML data stored in a string
// tree, err := toml.Load(stringContainingTomlData) // tree, err := toml.Load(stringContainingTomlData)
// //
// Either way, the result is a TomlTree object that can be used to navigate the // Either way, the result is a Tree object that can be used to navigate the
// structure and data within the original document. // structure and data within the original document.
// //
// //
// Getting data from the TomlTree // Getting data from the Tree
// //
// After parsing TOML data with Load() or LoadFile(), use the Has() and Get() // After parsing TOML data with Load() or LoadFile(), use the Has() and Get()
// methods on the returned TomlTree, to find your way through the document data. // methods on the returned Tree, to find your way through the document data.
// //
// if tree.Has("foo") { // if tree.Has("foo") {
// fmt.Println("foo is:", tree.Get("foo")) // fmt.Println("foo is:", tree.Get("foo"))
@ -50,11 +50,11 @@
// tree.GetPath([]string{"foo","bar","baz"}) // tree.GetPath([]string{"foo","bar","baz"})
// //
// Note that this is distinct from the heavyweight query syntax supported by // Note that this is distinct from the heavyweight query syntax supported by
// TomlTree.Query() and the Query() struct (see below). // Tree.Query() and the Query() struct (see below).
// //
// Position Support // Position Support
// //
// Each element within the TomlTree is stored with position metadata, which is // Each element within the Tree is stored with position metadata, which is
// invaluable for providing semantic feedback to a user. This helps in // invaluable for providing semantic feedback to a user. This helps in
// situations where the TOML file parses correctly, but contains data that is // situations where the TOML file parses correctly, but contains data that is
// not correct for the application. In such cases, an error message can be // not correct for the application. In such cases, an error message can be
@ -75,176 +75,10 @@
// return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition("")) // return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition(""))
// } // }
// //
// Query Support // JSONPath-like queries
// //
// The TOML query path implementation is based loosely on the JSONPath specification: // The package github.com/pelletier/go-toml/query implements a system
// http://goessner.net/articles/JsonPath/ // similar to JSONPath to quickly retrive elements of a TOML document using a
// // single expression. See the package documentation for more information.
// The idea behind a query path is to allow quick access to any element, or set
// of elements within TOML document, with a single expression.
//
// result, err := tree.Query("$.foo.bar.baz")
//
// This is roughly equivalent to:
//
// next := tree.Get("foo")
// if next != nil {
// next = next.Get("bar")
// if next != nil {
// next = next.Get("baz")
// }
// }
// result := next
//
// err is nil if any parsing exception occurs.
//
// If no node in the tree matches the query, result will simply contain an empty list of
// items.
//
// As illustrated above, the query path is much more efficient, especially since
// the structure of the TOML file can vary. Rather than making assumptions about
// a document's structure, a query allows the programmer to make structured
// requests into the document, and get zero or more values as a result.
//
// The syntax of a query begins with a root token, followed by any number
// sub-expressions:
//
// $
// Root of the TOML tree. This must always come first.
// .name
// Selects child of this node, where 'name' is a TOML key
// name.
// ['name']
// Selects child of this node, where 'name' is a string
// containing a TOML key name.
// [index]
// Selcts child array element at 'index'.
// ..expr
// Recursively selects all children, filtered by an a union,
// index, or slice expression.
// ..*
// Recursive selection of all nodes at this point in the
// tree.
// .*
// Selects all children of the current node.
// [expr,expr]
// Union operator - a logical 'or' grouping of two or more
// sub-expressions: index, key name, or filter.
// [start:end:step]
// Slice operator - selects array elements from start to
// end-1, at the given step. All three arguments are
// optional.
// [?(filter)]
// Named filter expression - the function 'filter' is
// used to filter children at this node.
//
// Query Indexes And Slices
//
// Index expressions perform no bounds checking, and will contribute no
// values to the result set if the provided index or index range is invalid.
// Negative indexes represent values from the end of the array, counting backwards.
//
// // select the last index of the array named 'foo'
// tree.Query("$.foo[-1]")
//
// Slice expressions are supported, by using ':' to separate a start/end index pair.
//
// // select up to the first five elements in the array
// tree.Query("$.foo[0:5]")
//
// Slice expressions also allow negative indexes for the start and stop
// arguments.
//
// // select all array elements.
// tree.Query("$.foo[0:-1]")
//
// Slice expressions may have an optional stride/step parameter:
//
// // select every other element
// tree.Query("$.foo[0:-1:2]")
//
// Slice start and end parameters are also optional:
//
// // these are all equivalent and select all the values in the array
// tree.Query("$.foo[:]")
// tree.Query("$.foo[0:]")
// tree.Query("$.foo[:-1]")
// tree.Query("$.foo[0:-1:]")
// tree.Query("$.foo[::1]")
// tree.Query("$.foo[0::1]")
// tree.Query("$.foo[:-1:1]")
// tree.Query("$.foo[0:-1:1]")
//
// Query Filters
//
// Query filters are used within a Union [,] or single Filter [] expression.
// A filter only allows nodes that qualify through to the next expression,
// and/or into the result set.
//
// // returns children of foo that are permitted by the 'bar' filter.
// tree.Query("$.foo[?(bar)]")
//
// There are several filters provided with the library:
//
// tree
// Allows nodes of type TomlTree.
// int
// Allows nodes of type int64.
// float
// Allows nodes of type float64.
// string
// Allows nodes of type string.
// time
// Allows nodes of type time.Time.
// bool
// Allows nodes of type bool.
//
// Query Results
//
// An executed query returns a QueryResult object. This contains the nodes
// in the TOML tree that qualify the query expression. Position information
// is also available for each value in the set.
//
// // display the results of a query
// results := tree.Query("$.foo.bar.baz")
// for idx, value := results.Values() {
// fmt.Println("%v: %v", results.Positions()[idx], value)
// }
//
// Compiled Queries
//
// Queries may be executed directly on a TomlTree object, or compiled ahead
// of time and executed discretely. The former is more convienent, but has the
// penalty of having to recompile the query expression each time.
//
// // basic query
// results := tree.Query("$.foo.bar.baz")
//
// // compiled query
// query := toml.CompileQuery("$.foo.bar.baz")
// results := query.Execute(tree)
//
// // run the compiled query again on a different tree
// moreResults := query.Execute(anotherTree)
//
// User Defined Query Filters
//
// Filter expressions may also be user defined by using the SetFilter()
// function on the Query object. The function must return true/false, which
// signifies if the passed node is kept or discarded, respectively.
//
// // create a query that references a user-defined filter
// query, _ := CompileQuery("$[?(bazOnly)]")
//
// // define the filter, and assign it to the query
// query.SetFilter("bazOnly", func(node interface{}) bool{
// if tree, ok := node.(*TomlTree); ok {
// return tree.Has("baz")
// }
// return false // reject all other node types
// })
//
// // run the query
// query.Execute(tree)
// //
package toml package toml

View File

@ -1,6 +1,7 @@
package toml package toml
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
@ -9,14 +10,14 @@ import (
) )
/* /*
TomlTree structural types and corresponding marshal types Tree structural types and corresponding marshal types
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
*TomlTree (*)struct, (*)map[string]interface{} *Tree (*)struct, (*)map[string]interface{}
[]*TomlTree (*)[](*)struct, (*)[](*)map[string]interface{} []*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
[]interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
interface{} (*)primitive interface{} (*)primitive
TomlTree primitive types and corresponding marshal types Tree primitive types and corresponding marshal types
----------------------------------------------------------- -----------------------------------------------------------
uint64 uint, uint8-uint64, pointers to same uint64 uint, uint8-uint64, pointers to same
int64 int, int8-uint64, pointers to same int64 int, int8-uint64, pointers to same
@ -35,7 +36,7 @@ type tomlOpts struct {
var timeType = reflect.TypeOf(time.Time{}) var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// Check if the given marshall type maps to a TomlTree primitive // Check if the given marshall type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool { func isPrimitive(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Ptr: case reflect.Ptr:
@ -57,7 +58,7 @@ func isPrimitive(mtype reflect.Type) bool {
} }
} }
// Check if the given marshall type maps to a TomlTree slice // Check if the given marshall type maps to a Tree slice
func isTreeSlice(mtype reflect.Type) bool { func isTreeSlice(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Slice: case reflect.Slice:
@ -67,7 +68,7 @@ func isTreeSlice(mtype reflect.Type) bool {
} }
} }
// Check if the given marshall type maps to a non-TomlTree slice // Check if the given marshall type maps to a non-Tree slice
func isOtherSlice(mtype reflect.Type) bool { func isOtherSlice(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Ptr: case reflect.Ptr:
@ -79,7 +80,7 @@ func isOtherSlice(mtype reflect.Type) bool {
} }
} }
// Check if the given marshall type maps to a TomlTree // Check if the given marshall type maps to a Tree
func isTree(mtype reflect.Type) bool { func isTree(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Map: case reflect.Map:
@ -133,11 +134,11 @@ func Marshal(v interface{}) ([]byte, error) {
} }
// Convert given marshal struct or map value to toml tree // Convert given marshal struct or map value to toml tree
func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) { func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
if mtype.Kind() == reflect.Ptr { if mtype.Kind() == reflect.Ptr {
return valueToTree(mtype.Elem(), mval.Elem()) return valueToTree(mtype.Elem(), mval.Elem())
} }
tval := newTomlTree() tval := newTree()
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Struct: case reflect.Struct:
for i := 0; i < mtype.NumField(); i++ { for i := 0; i < mtype.NumField(); i++ {
@ -165,8 +166,8 @@ func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) {
} }
// Convert given marshal slice to slice of Toml trees // Convert given marshal slice to slice of Toml trees
func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*TomlTree, error) { func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
tval := make([]*TomlTree, mval.Len(), mval.Len()) tval := make([]*Tree, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ { for i := 0; i < mval.Len(); i++ {
val, err := valueToTree(mtype.Elem(), mval.Index(i)) val, err := valueToTree(mtype.Elem(), mval.Index(i))
if err != nil { if err != nil {
@ -224,24 +225,15 @@ func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
} }
} }
/* // Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
Unmarshal parses the TOML-encoded data and stores the result in the value // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
pointed to by v. Behavior is similar to the Go json encoder, except that there // sub-structs, and only definite types can be unmarshaled.
is no concept of an Unmarshaler interface or UnmarshalTOML function for func (t *Tree) Unmarshal(v interface{}) error {
sub-structs, and currently only definite types can be unmarshaled to (i.e. no
`interface{}`).
*/
func Unmarshal(data []byte, v interface{}) error {
mtype := reflect.TypeOf(v) mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
return errors.New("Only a pointer to struct can be unmarshaled from TOML") return errors.New("Only a pointer to struct can be unmarshaled from TOML")
} }
t, err := Load(string(data))
if err != nil {
return err
}
sval, err := valueFromTree(mtype.Elem(), t) sval, err := valueFromTree(mtype.Elem(), t)
if err != nil { if err != nil {
return err return err
@ -250,8 +242,21 @@ func Unmarshal(data []byte, v interface{}) error {
return nil return nil
} }
// Unmarshal parses the TOML-encoded data and stores the result in the value
// pointed to by v. Behavior is similar to the Go json encoder, except that there
// is no concept of an Unmarshaler interface or UnmarshalTOML function for
// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
// `interface{}`).
func Unmarshal(data []byte, v interface{}) error {
t, err := LoadReader(bytes.NewReader(data))
if err != nil {
return err
}
return t.Unmarshal(v)
}
// Convert toml tree to marshal struct or map, using marshal type // Convert toml tree to marshal struct or map, using marshal type
func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) { func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr { if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval) return unwrapPointer(mtype, tval)
} }
@ -290,7 +295,7 @@ func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) {
} }
// Convert toml value to marshal struct/map slice, using marshal type // Convert toml value to marshal struct/map slice, using marshal type
func valueFromTreeSlice(mtype reflect.Type, tval []*TomlTree) (reflect.Value, error) { func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval)) mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ { for i := 0; i < len(tval); i++ {
val, err := valueFromTree(mtype.Elem(), tval[i]) val, err := valueFromTree(mtype.Elem(), tval[i])
@ -322,9 +327,9 @@ func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error)
} }
switch { switch {
case isTree(mtype): case isTree(mtype):
return valueFromTree(mtype, tval.(*TomlTree)) return valueFromTree(mtype, tval.(*Tree))
case isTreeSlice(mtype): case isTreeSlice(mtype):
return valueFromTreeSlice(mtype, tval.([]*TomlTree)) return valueFromTreeSlice(mtype, tval.([]*Tree))
case isOtherSlice(mtype): case isOtherSlice(mtype):
return valueFromOtherSlice(mtype, tval.([]interface{})) return valueFromOtherSlice(mtype, tval.([]interface{}))
default: default:

View File

@ -1,234 +0,0 @@
package toml
import (
"fmt"
)
// support function to set positions for tomlValues
// NOTE: this is done to allow ctx.lastPosition to indicate the start of any
// values returned by the query engines
func tomlValueCheck(node interface{}, ctx *queryContext) interface{} {
switch castNode := node.(type) {
case *tomlValue:
ctx.lastPosition = castNode.position
return castNode.value
case []*TomlTree:
if len(castNode) > 0 {
ctx.lastPosition = castNode[0].position
}
return node
default:
return node
}
}
// base match
type matchBase struct {
next pathFn
}
func (f *matchBase) setNext(next pathFn) {
f.next = next
}
// terminating functor - gathers results
type terminatingFn struct {
// empty
}
func newTerminatingFn() *terminatingFn {
return &terminatingFn{}
}
func (f *terminatingFn) setNext(next pathFn) {
// do nothing
}
func (f *terminatingFn) call(node interface{}, ctx *queryContext) {
switch castNode := node.(type) {
case *TomlTree:
ctx.result.appendResult(node, castNode.position)
case *tomlValue:
ctx.result.appendResult(node, castNode.position)
default:
// use last position for scalars
ctx.result.appendResult(node, ctx.lastPosition)
}
}
// match single key
type matchKeyFn struct {
matchBase
Name string
}
func newMatchKeyFn(name string) *matchKeyFn {
return &matchKeyFn{Name: name}
}
func (f *matchKeyFn) call(node interface{}, ctx *queryContext) {
if array, ok := node.([]*TomlTree); ok {
for _, tree := range array {
item := tree.values[f.Name]
if item != nil {
f.next.call(item, ctx)
}
}
} else if tree, ok := node.(*TomlTree); ok {
item := tree.values[f.Name]
if item != nil {
f.next.call(item, ctx)
}
}
}
// match single index
type matchIndexFn struct {
matchBase
Idx int
}
func newMatchIndexFn(idx int) *matchIndexFn {
return &matchIndexFn{Idx: idx}
}
func (f *matchIndexFn) call(node interface{}, ctx *queryContext) {
if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
if f.Idx < len(arr) && f.Idx >= 0 {
f.next.call(arr[f.Idx], ctx)
}
}
}
// filter by slicing
type matchSliceFn struct {
matchBase
Start, End, Step int
}
func newMatchSliceFn(start, end, step int) *matchSliceFn {
return &matchSliceFn{Start: start, End: end, Step: step}
}
func (f *matchSliceFn) call(node interface{}, ctx *queryContext) {
if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
// adjust indexes for negative values, reverse ordering
realStart, realEnd := f.Start, f.End
if realStart < 0 {
realStart = len(arr) + realStart
}
if realEnd < 0 {
realEnd = len(arr) + realEnd
}
if realEnd < realStart {
realEnd, realStart = realStart, realEnd // swap
}
// loop and gather
for idx := realStart; idx < realEnd; idx += f.Step {
f.next.call(arr[idx], ctx)
}
}
}
// match anything
type matchAnyFn struct {
matchBase
}
func newMatchAnyFn() *matchAnyFn {
return &matchAnyFn{}
}
func (f *matchAnyFn) call(node interface{}, ctx *queryContext) {
if tree, ok := node.(*TomlTree); ok {
for _, v := range tree.values {
f.next.call(v, ctx)
}
}
}
// filter through union
type matchUnionFn struct {
Union []pathFn
}
func (f *matchUnionFn) setNext(next pathFn) {
for _, fn := range f.Union {
fn.setNext(next)
}
}
func (f *matchUnionFn) call(node interface{}, ctx *queryContext) {
for _, fn := range f.Union {
fn.call(node, ctx)
}
}
// match every single last node in the tree
type matchRecursiveFn struct {
matchBase
}
func newMatchRecursiveFn() *matchRecursiveFn {
return &matchRecursiveFn{}
}
func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) {
if tree, ok := node.(*TomlTree); ok {
var visit func(tree *TomlTree)
visit = func(tree *TomlTree) {
for _, v := range tree.values {
f.next.call(v, ctx)
switch node := v.(type) {
case *TomlTree:
visit(node)
case []*TomlTree:
for _, subtree := range node {
visit(subtree)
}
}
}
}
f.next.call(tree, ctx)
visit(tree)
}
}
// match based on an externally provided functional filter
type matchFilterFn struct {
matchBase
Pos Position
Name string
}
func newMatchFilterFn(name string, pos Position) *matchFilterFn {
return &matchFilterFn{Name: name, Pos: pos}
}
func (f *matchFilterFn) call(node interface{}, ctx *queryContext) {
fn, ok := (*ctx.filters)[f.Name]
if !ok {
panic(fmt.Sprintf("%s: query context does not have filter '%s'",
f.Pos.String(), f.Name))
}
switch castNode := tomlValueCheck(node, ctx).(type) {
case *TomlTree:
for _, v := range castNode.values {
if tv, ok := v.(*tomlValue); ok {
if fn(tv.value) {
f.next.call(v, ctx)
}
} else {
if fn(v) {
f.next.call(v, ctx)
}
}
}
case []interface{}:
for _, v := range castNode {
if fn(v) {
f.next.call(v, ctx)
}
}
}
}

View File

@ -14,7 +14,7 @@ import (
type tomlParser struct { type tomlParser struct {
flow chan token flow chan token
tree *TomlTree tree *Tree
tokensBuffer []token tokensBuffer []token
currentTable []string currentTable []string
seenTableKeys []string seenTableKeys []string
@ -106,18 +106,18 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
} }
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys) destTree := p.tree.GetPath(keys)
var array []*TomlTree var array []*Tree
if destTree == nil { if destTree == nil {
array = make([]*TomlTree, 0) array = make([]*Tree, 0)
} else if target, ok := destTree.([]*TomlTree); ok && target != nil { } else if target, ok := destTree.([]*Tree); ok && target != nil {
array = destTree.([]*TomlTree) array = destTree.([]*Tree)
} else { } else {
p.raiseError(key, "key %s is already assigned and not of type table array", key) p.raiseError(key, "key %s is already assigned and not of type table array", key)
} }
p.currentTable = keys p.currentTable = keys
// add a new tree to the end of the table array // add a new tree to the end of the table array
newTree := newTomlTree() newTree := newTree()
newTree.position = startToken.Position newTree.position = startToken.Position
array = append(array, newTree) array = append(array, newTree)
p.tree.SetPath(p.currentTable, array) p.tree.SetPath(p.currentTable, array)
@ -183,11 +183,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
} }
// find the table to assign, looking out for arrays of tables // find the table to assign, looking out for arrays of tables
var targetNode *TomlTree var targetNode *Tree
switch node := p.tree.GetPath(tableKey).(type) { switch node := p.tree.GetPath(tableKey).(type) {
case []*TomlTree: case []*Tree:
targetNode = node[len(node)-1] targetNode = node[len(node)-1]
case *TomlTree: case *Tree:
targetNode = node targetNode = node
default: default:
p.raiseError(key, "Unknown table type for path: %s", p.raiseError(key, "Unknown table type for path: %s",
@ -212,7 +212,7 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
var toInsert interface{} var toInsert interface{}
switch value.(type) { switch value.(type) {
case *TomlTree, []*TomlTree: case *Tree, []*Tree:
toInsert = value toInsert = value
default: default:
toInsert = &tomlValue{value, key.Position} toInsert = &tomlValue{value, key.Position}
@ -289,8 +289,8 @@ func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma return t != nil && t.typ == tokenComma
} }
func (p *tomlParser) parseInlineTable() *TomlTree { func (p *tomlParser) parseInlineTable() *Tree {
tree := newTomlTree() tree := newTree()
var previous *token var previous *token
Loop: Loop:
for { for {
@ -360,22 +360,22 @@ func (p *tomlParser) parseArray() interface{} {
p.getToken() p.getToken()
} }
} }
// An array of TomlTrees is actually an array of inline // An array of Trees is actually an array of inline
// tables, which is a shorthand for a table array. If the // tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*TomlTree, // array was not converted from []interface{} to []*Tree,
// the two notations would not be equivalent. // the two notations would not be equivalent.
if arrayType == reflect.TypeOf(newTomlTree()) { if arrayType == reflect.TypeOf(newTree()) {
tomlArray := make([]*TomlTree, len(array)) tomlArray := make([]*Tree, len(array))
for i, v := range array { for i, v := range array {
tomlArray[i] = v.(*TomlTree) tomlArray[i] = v.(*Tree)
} }
return tomlArray return tomlArray
} }
return array return array
} }
func parseToml(flow chan token) *TomlTree { func parseToml(flow chan token) *Tree {
result := newTomlTree() result := newTree()
result.position = Position{1, 1} result.position = Position{1, 1}
parser := &tomlParser{ parser := &tomlParser{
flow: flow, flow: flow,

View File

@ -1,153 +0,0 @@
package toml
import (
"time"
)
// NodeFilterFn represents a user-defined filter function, for use with
// Query.SetFilter().
//
// The return value of the function must indicate if 'node' is to be included
// at this stage of the TOML path. Returning true will include the node, and
// returning false will exclude it.
//
// NOTE: Care should be taken to write script callbacks such that they are safe
// to use from multiple goroutines.
type NodeFilterFn func(node interface{}) bool
// QueryResult is the result of Executing a Query.
type QueryResult struct {
items []interface{}
positions []Position
}
// appends a value/position pair to the result set.
func (r *QueryResult) appendResult(node interface{}, pos Position) {
r.items = append(r.items, node)
r.positions = append(r.positions, pos)
}
// Values is a set of values within a QueryResult. The order of values is not
// guaranteed to be in document order, and may be different each time a query is
// executed.
func (r QueryResult) Values() []interface{} {
values := make([]interface{}, len(r.items))
for i, v := range r.items {
o, ok := v.(*tomlValue)
if ok {
values[i] = o.value
} else {
values[i] = v
}
}
return values
}
// Positions is a set of positions for values within a QueryResult. Each index
// in Positions() corresponds to the entry in Value() of the same index.
func (r QueryResult) Positions() []Position {
return r.positions
}
// runtime context for executing query paths
type queryContext struct {
result *QueryResult
filters *map[string]NodeFilterFn
lastPosition Position
}
// generic path functor interface
type pathFn interface {
setNext(next pathFn)
call(node interface{}, ctx *queryContext)
}
// A Query is the representation of a compiled TOML path. A Query is safe
// for concurrent use by multiple goroutines.
type Query struct {
root pathFn
tail pathFn
filters *map[string]NodeFilterFn
}
func newQuery() *Query {
return &Query{
root: nil,
tail: nil,
filters: &defaultFilterFunctions,
}
}
func (q *Query) appendPath(next pathFn) {
if q.root == nil {
q.root = next
} else {
q.tail.setNext(next)
}
q.tail = next
next.setNext(newTerminatingFn()) // init the next functor
}
// CompileQuery compiles a TOML path expression. The returned Query can be used
// to match elements within a TomlTree and its descendants.
func CompileQuery(path string) (*Query, error) {
return parseQuery(lexQuery(path))
}
// Execute executes a query against a TomlTree, and returns the result of the query.
func (q *Query) Execute(tree *TomlTree) *QueryResult {
result := &QueryResult{
items: []interface{}{},
positions: []Position{},
}
if q.root == nil {
result.appendResult(tree, tree.GetPosition(""))
} else {
ctx := &queryContext{
result: result,
filters: q.filters,
}
q.root.call(tree, ctx)
}
return result
}
// SetFilter sets a user-defined filter function. These may be used inside
// "?(..)" query expressions to filter TOML document elements within a query.
func (q *Query) SetFilter(name string, fn NodeFilterFn) {
if q.filters == &defaultFilterFunctions {
// clone the static table
q.filters = &map[string]NodeFilterFn{}
for k, v := range defaultFilterFunctions {
(*q.filters)[k] = v
}
}
(*q.filters)[name] = fn
}
var defaultFilterFunctions = map[string]NodeFilterFn{
"tree": func(node interface{}) bool {
_, ok := node.(*TomlTree)
return ok
},
"int": func(node interface{}) bool {
_, ok := node.(int64)
return ok
},
"float": func(node interface{}) bool {
_, ok := node.(float64)
return ok
},
"string": func(node interface{}) bool {
_, ok := node.(string)
return ok
},
"time": func(node interface{}) bool {
_, ok := node.(time.Time)
return ok
},
"bool": func(node interface{}) bool {
_, ok := node.(bool)
return ok
},
}

View File

@ -1,356 +0,0 @@
// TOML JSONPath lexer.
//
// Written using the principles developed by Rob Pike in
// http://www.youtube.com/watch?v=HxaD_trXwRE
package toml
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
// Lexer state function
type queryLexStateFn func() queryLexStateFn
// Lexer definition
type queryLexer struct {
input string
start int
pos int
width int
tokens chan token
depth int
line int
col int
stringTerm string
}
func (l *queryLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
close(l.tokens)
}
func (l *queryLexer) nextStart() {
// iterate by runes (utf8 characters)
// search for newlines and advance line/col counts
for i := l.start; i < l.pos; {
r, width := utf8.DecodeRuneInString(l.input[i:])
if r == '\n' {
l.line++
l.col = 1
} else {
l.col++
}
i += width
}
// advance start position to next token
l.start = l.pos
}
func (l *queryLexer) emit(t tokenType) {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: t,
val: l.input[l.start:l.pos],
}
l.nextStart()
}
func (l *queryLexer) emitWithValue(t tokenType, value string) {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: t,
val: value,
}
l.nextStart()
}
func (l *queryLexer) next() rune {
if l.pos >= len(l.input) {
l.width = 0
return eof
}
var r rune
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += l.width
return r
}
func (l *queryLexer) ignore() {
l.nextStart()
}
func (l *queryLexer) backup() {
l.pos -= l.width
}
func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: tokenError,
val: fmt.Sprintf(format, args...),
}
return nil
}
func (l *queryLexer) peek() rune {
r := l.next()
l.backup()
return r
}
func (l *queryLexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
func (l *queryLexer) follow(next string) bool {
return strings.HasPrefix(l.input[l.pos:], next)
}
func (l *queryLexer) lexVoid() queryLexStateFn {
for {
next := l.peek()
switch next {
case '$':
l.pos++
l.emit(tokenDollar)
continue
case '.':
if l.follow("..") {
l.pos += 2
l.emit(tokenDotDot)
} else {
l.pos++
l.emit(tokenDot)
}
continue
case '[':
l.pos++
l.emit(tokenLeftBracket)
continue
case ']':
l.pos++
l.emit(tokenRightBracket)
continue
case ',':
l.pos++
l.emit(tokenComma)
continue
case '*':
l.pos++
l.emit(tokenStar)
continue
case '(':
l.pos++
l.emit(tokenLeftParen)
continue
case ')':
l.pos++
l.emit(tokenRightParen)
continue
case '?':
l.pos++
l.emit(tokenQuestion)
continue
case ':':
l.pos++
l.emit(tokenColon)
continue
case '\'':
l.ignore()
l.stringTerm = string(next)
return l.lexString
case '"':
l.ignore()
l.stringTerm = string(next)
return l.lexString
}
if isSpace(next) {
l.next()
l.ignore()
continue
}
if isAlphanumeric(next) {
return l.lexKey
}
if next == '+' || next == '-' || isDigit(next) {
return l.lexNumber
}
if l.next() == eof {
break
}
return l.errorf("unexpected char: '%v'", next)
}
l.emit(tokenEOF)
return nil
}
func (l *queryLexer) lexKey() queryLexStateFn {
for {
next := l.peek()
if !isAlphanumeric(next) {
l.emit(tokenKey)
return l.lexVoid
}
if l.next() == eof {
break
}
}
l.emit(tokenEOF)
return nil
}
func (l *queryLexer) lexString() queryLexStateFn {
l.pos++
l.ignore()
growingString := ""
for {
if l.follow(l.stringTerm) {
l.emitWithValue(tokenString, growingString)
l.pos++
l.ignore()
return l.lexVoid
}
if l.follow("\\\"") {
l.pos++
growingString += "\""
} else if l.follow("\\'") {
l.pos++
growingString += "'"
} else if l.follow("\\n") {
l.pos++
growingString += "\n"
} else if l.follow("\\b") {
l.pos++
growingString += "\b"
} else if l.follow("\\f") {
l.pos++
growingString += "\f"
} else if l.follow("\\/") {
l.pos++
growingString += "/"
} else if l.follow("\\t") {
l.pos++
growingString += "\t"
} else if l.follow("\\r") {
l.pos++
growingString += "\r"
} else if l.follow("\\\\") {
l.pos++
growingString += "\\"
} else if l.follow("\\u") {
l.pos += 2
code := ""
for i := 0; i < 4; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
} else if l.follow("\\U") {
l.pos += 2
code := ""
for i := 0; i < 8; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
} else if l.follow("\\") {
l.pos++
return l.errorf("invalid escape sequence: \\" + string(l.peek()))
} else {
growingString += string(l.peek())
}
if l.next() == eof {
break
}
}
return l.errorf("unclosed string")
}
func (l *queryLexer) lexNumber() queryLexStateFn {
l.ignore()
if !l.accept("+") {
l.accept("-")
}
pointSeen := false
digitSeen := false
for {
next := l.next()
if next == '.' {
if pointSeen {
return l.errorf("cannot have two dots in one float")
}
if !isDigit(l.peek()) {
return l.errorf("float cannot end with a dot")
}
pointSeen = true
} else if isDigit(next) {
digitSeen = true
} else {
l.backup()
break
}
if pointSeen && !digitSeen {
return l.errorf("cannot start float with a dot")
}
}
if !digitSeen {
return l.errorf("no digit in that number")
}
if pointSeen {
l.emit(tokenFloat)
} else {
l.emit(tokenInteger)
}
return l.lexVoid
}
// Entry point
func lexQuery(input string) chan token {
l := &queryLexer{
input: input,
tokens: make(chan token),
line: 1,
col: 1,
}
go l.run()
return l.tokens
}

View File

@ -1,275 +0,0 @@
/*
Based on the "jsonpath" spec/concept.
http://goessner.net/articles/JsonPath/
https://code.google.com/p/json-path/
*/
package toml
import (
"fmt"
)
const maxInt = int(^uint(0) >> 1)
type queryParser struct {
flow chan token
tokensBuffer []token
query *Query
union []pathFn
err error
}
type queryParserStateFn func() queryParserStateFn
// Formats and panics an error message based on a token
func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn {
p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...)
return nil // trigger parse to end
}
func (p *queryParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *queryParser) backup(tok *token) {
p.tokensBuffer = append(p.tokensBuffer, *tok)
}
func (p *queryParser) peek() *token {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.backup(&tok)
return &tok
}
func (p *queryParser) lookahead(types ...tokenType) bool {
result := true
buffer := []token{}
for _, typ := range types {
tok := p.getToken()
if tok == nil {
result = false
break
}
buffer = append(buffer, *tok)
if tok.typ != typ {
result = false
break
}
}
// add the tokens back to the buffer, and return
p.tokensBuffer = append(p.tokensBuffer, buffer...)
return result
}
func (p *queryParser) getToken() *token {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &tok
}
func (p *queryParser) parseStart() queryParserStateFn {
tok := p.getToken()
if tok == nil || tok.typ == tokenEOF {
return nil
}
if tok.typ != tokenDollar {
return p.parseError(tok, "Expected '$' at start of expression")
}
return p.parseMatchExpr
}
// handle '.' prefix, '[]', and '..'
func (p *queryParser) parseMatchExpr() queryParserStateFn {
tok := p.getToken()
switch tok.typ {
case tokenDotDot:
p.query.appendPath(&matchRecursiveFn{})
// nested parse for '..'
tok := p.getToken()
switch tok.typ {
case tokenKey:
p.query.appendPath(newMatchKeyFn(tok.val))
return p.parseMatchExpr
case tokenLeftBracket:
return p.parseBracketExpr
case tokenStar:
// do nothing - the recursive predicate is enough
return p.parseMatchExpr
}
case tokenDot:
// nested parse for '.'
tok := p.getToken()
switch tok.typ {
case tokenKey:
p.query.appendPath(newMatchKeyFn(tok.val))
return p.parseMatchExpr
case tokenStar:
p.query.appendPath(&matchAnyFn{})
return p.parseMatchExpr
}
case tokenLeftBracket:
return p.parseBracketExpr
case tokenEOF:
return nil // allow EOF at this stage
}
return p.parseError(tok, "expected match expression")
}
func (p *queryParser) parseBracketExpr() queryParserStateFn {
if p.lookahead(tokenInteger, tokenColon) {
return p.parseSliceExpr
}
if p.peek().typ == tokenColon {
return p.parseSliceExpr
}
return p.parseUnionExpr
}
func (p *queryParser) parseUnionExpr() queryParserStateFn {
var tok *token
// this state can be traversed after some sub-expressions
// so be careful when setting up state in the parser
if p.union == nil {
p.union = []pathFn{}
}
loop: // labeled loop for easy breaking
for {
if len(p.union) > 0 {
// parse delimiter or terminator
tok = p.getToken()
switch tok.typ {
case tokenComma:
// do nothing
case tokenRightBracket:
break loop
default:
return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val)
}
}
// parse sub expression
tok = p.getToken()
switch tok.typ {
case tokenInteger:
p.union = append(p.union, newMatchIndexFn(tok.Int()))
case tokenKey:
p.union = append(p.union, newMatchKeyFn(tok.val))
case tokenString:
p.union = append(p.union, newMatchKeyFn(tok.val))
case tokenQuestion:
return p.parseFilterExpr
default:
return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union))
}
}
// if there is only one sub-expression, use that instead
if len(p.union) == 1 {
p.query.appendPath(p.union[0])
} else {
p.query.appendPath(&matchUnionFn{p.union})
}
p.union = nil // clear out state
return p.parseMatchExpr
}
func (p *queryParser) parseSliceExpr() queryParserStateFn {
// init slice to grab all elements
start, end, step := 0, maxInt, 1
// parse optional start
tok := p.getToken()
if tok.typ == tokenInteger {
start = tok.Int()
tok = p.getToken()
}
if tok.typ != tokenColon {
return p.parseError(tok, "expected ':'")
}
// parse optional end
tok = p.getToken()
if tok.typ == tokenInteger {
end = tok.Int()
tok = p.getToken()
}
if tok.typ == tokenRightBracket {
p.query.appendPath(newMatchSliceFn(start, end, step))
return p.parseMatchExpr
}
if tok.typ != tokenColon {
return p.parseError(tok, "expected ']' or ':'")
}
// parse optional step
tok = p.getToken()
if tok.typ == tokenInteger {
step = tok.Int()
if step < 0 {
return p.parseError(tok, "step must be a positive value")
}
tok = p.getToken()
}
if tok.typ != tokenRightBracket {
return p.parseError(tok, "expected ']'")
}
p.query.appendPath(newMatchSliceFn(start, end, step))
return p.parseMatchExpr
}
func (p *queryParser) parseFilterExpr() queryParserStateFn {
tok := p.getToken()
if tok.typ != tokenLeftParen {
return p.parseError(tok, "expected left-parenthesis for filter expression")
}
tok = p.getToken()
if tok.typ != tokenKey && tok.typ != tokenString {
return p.parseError(tok, "expected key or string for filter funciton name")
}
name := tok.val
tok = p.getToken()
if tok.typ != tokenRightParen {
return p.parseError(tok, "expected right-parenthesis for filter expression")
}
p.union = append(p.union, newMatchFilterFn(name, tok.Position))
return p.parseUnionExpr
}
func parseQuery(flow chan token) (*Query, error) {
parser := &queryParser{
flow: flow,
tokensBuffer: []token{},
query: newQuery(),
}
parser.run()
return parser.query, parser.err
}

View File

@ -14,30 +14,35 @@ type tomlValue struct {
position Position position Position
} }
// TomlTree is the result of the parsing of a TOML file. // Tree is the result of the parsing of a TOML file.
type TomlTree struct { type Tree struct {
values map[string]interface{} // string -> *tomlValue, *TomlTree, []*TomlTree values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
position Position position Position
} }
func newTomlTree() *TomlTree { func newTree() *Tree {
return &TomlTree{ return &Tree{
values: make(map[string]interface{}), values: make(map[string]interface{}),
position: Position{}, position: Position{},
} }
} }
// TreeFromMap initializes a new TomlTree object using the given map. // TreeFromMap initializes a new Tree object using the given map.
func TreeFromMap(m map[string]interface{}) (*TomlTree, error) { func TreeFromMap(m map[string]interface{}) (*Tree, error) {
result, err := toTree(m) result, err := toTree(m)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return result.(*TomlTree), nil return result.(*Tree), nil
}
// Position returns the position of the tree.
func (t *Tree) Position() Position {
return t.position
} }
// Has returns a boolean indicating if the given key exists. // Has returns a boolean indicating if the given key exists.
func (t *TomlTree) Has(key string) bool { func (t *Tree) Has(key string) bool {
if key == "" { if key == "" {
return false return false
} }
@ -45,25 +50,27 @@ func (t *TomlTree) Has(key string) bool {
} }
// HasPath returns true if the given path of keys exists, false otherwise. // HasPath returns true if the given path of keys exists, false otherwise.
func (t *TomlTree) HasPath(keys []string) bool { func (t *Tree) HasPath(keys []string) bool {
return t.GetPath(keys) != nil return t.GetPath(keys) != nil
} }
// Keys returns the keys of the toplevel tree. // Keys returns the keys of the toplevel tree.
// Warning: this is a costly operation. // Warning: this is a costly operation.
func (t *TomlTree) Keys() []string { func (t *Tree) Keys() []string {
var keys []string keys := make([]string, len(t.values))
i := 0
for k := range t.values { for k := range t.values {
keys = append(keys, k) keys[i] = k
i++
} }
return keys return keys
} }
// Get the value at key in the TomlTree. // Get the value at key in the Tree.
// Key is a dot-separated path (e.g. a.b.c). // Key is a dot-separated path (e.g. a.b.c).
// Returns nil if the path does not exist in the tree. // Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned. // If keys is of length zero, the current tree is returned.
func (t *TomlTree) Get(key string) interface{} { func (t *Tree) Get(key string) interface{} {
if key == "" { if key == "" {
return t return t
} }
@ -76,7 +83,7 @@ func (t *TomlTree) Get(key string) interface{} {
// GetPath returns the element in the tree indicated by 'keys'. // GetPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned. // If keys is of length zero, the current tree is returned.
func (t *TomlTree) GetPath(keys []string) interface{} { func (t *Tree) GetPath(keys []string) interface{} {
if len(keys) == 0 { if len(keys) == 0 {
return t return t
} }
@ -87,9 +94,9 @@ func (t *TomlTree) GetPath(keys []string) interface{} {
return nil return nil
} }
switch node := value.(type) { switch node := value.(type) {
case *TomlTree: case *Tree:
subtree = node subtree = node
case []*TomlTree: case []*Tree:
// go to most recent element // go to most recent element
if len(node) == 0 { if len(node) == 0 {
return nil return nil
@ -109,7 +116,7 @@ func (t *TomlTree) GetPath(keys []string) interface{} {
} }
// GetPosition returns the position of the given key. // GetPosition returns the position of the given key.
func (t *TomlTree) GetPosition(key string) Position { func (t *Tree) GetPosition(key string) Position {
if key == "" { if key == "" {
return t.position return t.position
} }
@ -118,7 +125,7 @@ func (t *TomlTree) GetPosition(key string) Position {
// GetPositionPath returns the element in the tree indicated by 'keys'. // GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned. // If keys is of length zero, the current tree is returned.
func (t *TomlTree) GetPositionPath(keys []string) Position { func (t *Tree) GetPositionPath(keys []string) Position {
if len(keys) == 0 { if len(keys) == 0 {
return t.position return t.position
} }
@ -129,9 +136,9 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
return Position{0, 0} return Position{0, 0}
} }
switch node := value.(type) { switch node := value.(type) {
case *TomlTree: case *Tree:
subtree = node subtree = node
case []*TomlTree: case []*Tree:
// go to most recent element // go to most recent element
if len(node) == 0 { if len(node) == 0 {
return Position{0, 0} return Position{0, 0}
@ -145,9 +152,9 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
switch node := subtree.values[keys[len(keys)-1]].(type) { switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue: case *tomlValue:
return node.position return node.position
case *TomlTree: case *Tree:
return node.position return node.position
case []*TomlTree: case []*Tree:
// go to most recent element // go to most recent element
if len(node) == 0 { if len(node) == 0 {
return Position{0, 0} return Position{0, 0}
@ -159,7 +166,7 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
} }
// GetDefault works like Get but with a default value // GetDefault works like Get but with a default value
func (t *TomlTree) GetDefault(key string, def interface{}) interface{} { func (t *Tree) GetDefault(key string, def interface{}) interface{} {
val := t.Get(key) val := t.Get(key)
if val == nil { if val == nil {
return def return def
@ -169,30 +176,30 @@ func (t *TomlTree) GetDefault(key string, def interface{}) interface{} {
// Set an element in the tree. // Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c). // Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediates trees, if needed. // Creates all necessary intermediate trees, if needed.
func (t *TomlTree) Set(key string, value interface{}) { func (t *Tree) Set(key string, value interface{}) {
t.SetPath(strings.Split(key, "."), value) t.SetPath(strings.Split(key, "."), value)
} }
// SetPath sets an element in the tree. // SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}). // Keys is an array of path elements (e.g. {"a","b","c"}).
// Creates all necessary intermediates trees, if needed. // Creates all necessary intermediate trees, if needed.
func (t *TomlTree) SetPath(keys []string, value interface{}) { func (t *Tree) SetPath(keys []string, value interface{}) {
subtree := t subtree := t
for _, intermediateKey := range keys[:len(keys)-1] { for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey] nextTree, exists := subtree.values[intermediateKey]
if !exists { if !exists {
nextTree = newTomlTree() nextTree = newTree()
subtree.values[intermediateKey] = nextTree // add new element here subtree.values[intermediateKey] = nextTree // add new element here
} }
switch node := nextTree.(type) { switch node := nextTree.(type) {
case *TomlTree: case *Tree:
subtree = node subtree = node
case []*TomlTree: case []*Tree:
// go to most recent element // go to most recent element
if len(node) == 0 { if len(node) == 0 {
// create element if it does not exist // create element if it does not exist
subtree.values[intermediateKey] = append(node, newTomlTree()) subtree.values[intermediateKey] = append(node, newTree())
} }
subtree = node[len(node)-1] subtree = node[len(node)-1]
} }
@ -201,9 +208,9 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
var toInsert interface{} var toInsert interface{}
switch value.(type) { switch value.(type) {
case *TomlTree: case *Tree:
toInsert = value toInsert = value
case []*TomlTree: case []*Tree:
toInsert = value toInsert = value
case *tomlValue: case *tomlValue:
toInsert = value toInsert = value
@ -221,21 +228,21 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
// and tree[a][b][c] // and tree[a][b][c]
// //
// Returns nil on success, error object on failure // Returns nil on success, error object on failure
func (t *TomlTree) createSubTree(keys []string, pos Position) error { func (t *Tree) createSubTree(keys []string, pos Position) error {
subtree := t subtree := t
for _, intermediateKey := range keys { for _, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey] nextTree, exists := subtree.values[intermediateKey]
if !exists { if !exists {
tree := newTomlTree() tree := newTree()
tree.position = pos tree.position = pos
subtree.values[intermediateKey] = tree subtree.values[intermediateKey] = tree
nextTree = tree nextTree = tree
} }
switch node := nextTree.(type) { switch node := nextTree.(type) {
case []*TomlTree: case []*Tree:
subtree = node[len(node)-1] subtree = node[len(node)-1]
case *TomlTree: case *Tree:
subtree = node subtree = node
default: default:
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
@ -245,17 +252,8 @@ func (t *TomlTree) createSubTree(keys []string, pos Position) error {
return nil return nil
} }
// Query compiles and executes a query on a tree and returns the query result. // LoadReader creates a Tree from any io.Reader.
func (t *TomlTree) Query(query string) (*QueryResult, error) { func LoadReader(reader io.Reader) (tree *Tree, err error) {
q, err := CompileQuery(query)
if err != nil {
return nil, err
}
return q.Execute(t), nil
}
// LoadReader creates a TomlTree from any io.Reader.
func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok { if _, ok := r.(runtime.Error); ok {
@ -268,13 +266,13 @@ func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
return return
} }
// Load creates a TomlTree from a string. // Load creates a Tree from a string.
func Load(content string) (tree *TomlTree, err error) { func Load(content string) (tree *Tree, err error) {
return LoadReader(strings.NewReader(content)) return LoadReader(strings.NewReader(content))
} }
// LoadFile creates a TomlTree from a file. // LoadFile creates a Tree from a file.
func LoadFile(path string) (tree *TomlTree, err error) { func LoadFile(path string) (tree *Tree, err error) {
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -51,7 +51,7 @@ func simpleValueCoercion(object interface{}) (interface{}, error) {
case fmt.Stringer: case fmt.Stringer:
return original.String(), nil return original.String(), nil
default: default:
return nil, fmt.Errorf("cannot convert type %T to TomlTree", object) return nil, fmt.Errorf("cannot convert type %T to Tree", object)
} }
} }
@ -59,7 +59,7 @@ func sliceToTree(object interface{}) (interface{}, error) {
// arrays are a bit tricky, since they can represent either a // arrays are a bit tricky, since they can represent either a
// collection of simple values, which is represented by one // collection of simple values, which is represented by one
// *tomlValue, or an array of tables, which is represented by an // *tomlValue, or an array of tables, which is represented by an
// array of *TomlTree. // array of *Tree.
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
value := reflect.ValueOf(object) value := reflect.ValueOf(object)
@ -70,14 +70,14 @@ func sliceToTree(object interface{}) (interface{}, error) {
} }
if insideType.Kind() == reflect.Map { if insideType.Kind() == reflect.Map {
// this is considered as an array of tables // this is considered as an array of tables
tablesArray := make([]*TomlTree, 0, length) tablesArray := make([]*Tree, 0, length)
for i := 0; i < length; i++ { for i := 0; i < length; i++ {
table := value.Index(i) table := value.Index(i)
tree, err := toTree(table.Interface()) tree, err := toTree(table.Interface())
if err != nil { if err != nil {
return nil, err return nil, err
} }
tablesArray = append(tablesArray, tree.(*TomlTree)) tablesArray = append(tablesArray, tree.(*Tree))
} }
return tablesArray, nil return tablesArray, nil
} }
@ -120,7 +120,7 @@ func toTree(object interface{}) (interface{}, error) {
} }
values[key.String()] = newValue values[key.String()] = newValue
} }
return &TomlTree{values, Position{}}, nil return &Tree{values, Position{}}, nil
} }
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {

View File

@ -4,11 +4,11 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"reflect"
) )
// encodes a string to a TOML-compliant string value // encodes a string to a TOML-compliant string value
@ -83,14 +83,14 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
return "", fmt.Errorf("unsupported value type %T: %v", v, v) return "", fmt.Errorf("unsupported value type %T: %v", v, v)
} }
func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) { func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
simpleValuesKeys := make([]string, 0) simpleValuesKeys := make([]string, 0)
complexValuesKeys := make([]string, 0) complexValuesKeys := make([]string, 0)
for k := range t.values { for k := range t.values {
v := t.values[k] v := t.values[k]
switch v.(type) { switch v.(type) {
case *TomlTree, []*TomlTree: case *Tree, []*Tree:
complexValuesKeys = append(complexValuesKeys, k) complexValuesKeys = append(complexValuesKeys, k)
default: default:
simpleValuesKeys = append(simpleValuesKeys, k) simpleValuesKeys = append(simpleValuesKeys, k)
@ -129,7 +129,7 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
switch node := v.(type) { switch node := v.(type) {
// node has to be of those two types given how keys are sorted above // node has to be of those two types given how keys are sorted above
case *TomlTree: case *Tree:
tableName := fmt.Sprintf("\n%s[%s]\n", indent, combinedKey) tableName := fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
writtenBytesCount, err := w.Write([]byte(tableName)) writtenBytesCount, err := w.Write([]byte(tableName))
bytesCount += int64(writtenBytesCount) bytesCount += int64(writtenBytesCount)
@ -140,7 +140,7 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
case []*TomlTree: case []*Tree:
for _, subTree := range node { for _, subTree := range node {
if len(subTree.values) > 0 { if len(subTree.values) > 0 {
tableArrayName := fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey) tableArrayName := fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
@ -162,16 +162,16 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
return bytesCount, nil return bytesCount, nil
} }
// WriteTo encode the TomlTree as Toml and writes it to the writer w. // WriteTo encode the Tree as Toml and writes it to the writer w.
// Returns the number of bytes written in case of success, or an error if anything happened. // Returns the number of bytes written in case of success, or an error if anything happened.
func (t *TomlTree) WriteTo(w io.Writer) (int64, error) { func (t *Tree) WriteTo(w io.Writer) (int64, error) {
return t.writeTo(w, "", "", 0) return t.writeTo(w, "", "", 0)
} }
// ToTomlString generates a human-readable representation of the current tree. // ToTomlString generates a human-readable representation of the current tree.
// Output spans multiple lines, and is suitable for ingest by a TOML parser. // Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error. // If the conversion cannot be performed, ToString returns a non-nil error.
func (t *TomlTree) ToTomlString() (string, error) { func (t *Tree) ToTomlString() (string, error) {
var buf bytes.Buffer var buf bytes.Buffer
_, err := t.WriteTo(&buf) _, err := t.WriteTo(&buf)
if err != nil { if err != nil {
@ -182,7 +182,7 @@ func (t *TomlTree) ToTomlString() (string, error) {
// String generates a human-readable representation of the current tree. // String generates a human-readable representation of the current tree.
// Alias of ToString. Present to implement the fmt.Stringer interface. // Alias of ToString. Present to implement the fmt.Stringer interface.
func (t *TomlTree) String() string { func (t *Tree) String() string {
result, _ := t.ToTomlString() result, _ := t.ToTomlString()
return result return result
} }
@ -196,18 +196,18 @@ func (t *TomlTree) String() string {
// * time.Time // * time.Time
// * map[string]interface{} (where interface{} is any of this list) // * map[string]interface{} (where interface{} is any of this list)
// * []interface{} (where interface{} is any of this list) // * []interface{} (where interface{} is any of this list)
func (t *TomlTree) ToMap() map[string]interface{} { func (t *Tree) ToMap() map[string]interface{} {
result := map[string]interface{}{} result := map[string]interface{}{}
for k, v := range t.values { for k, v := range t.values {
switch node := v.(type) { switch node := v.(type) {
case []*TomlTree: case []*Tree:
var array []interface{} var array []interface{}
for _, item := range node { for _, item := range node {
array = append(array, item.ToMap()) array = append(array, item.ToMap())
} }
result[k] = array result[k] = array
case *TomlTree: case *Tree:
result[k] = node.ToMap() result[k] = node.ToMap()
case *tomlValue: case *tomlValue:
result[k] = node.value result[k] = node.value

View File

@ -79,6 +79,14 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). // StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) { func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb { switch verb {
case 'v': case 'v':

View File

@ -29,6 +29,7 @@ import (
var templateFuncs = template.FuncMap{ var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace, "trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace, "trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent, "appendIfNotPresent": appendIfNotPresent,
"rpad": rpad, "rpad": rpad,
"gt": Gt, "gt": Gt,
@ -65,6 +66,8 @@ func OnInitialize(y ...func()) {
initializers = append(initializers, y...) initializers = append(initializers, y...)
} }
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, // Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as // Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
// ints and then compared. // ints and then compared.
@ -95,6 +98,8 @@ func Gt(a interface{}, b interface{}) bool {
return left > right return left > right
} }
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. // Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool { func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a) av := reflect.ValueOf(a)
@ -115,6 +120,8 @@ func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace) return strings.TrimRightFunc(s, unicode.IsSpace)
} }
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. // appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
func appendIfNotPresent(s, stringToAppend string) string { func appendIfNotPresent(s, stringToAppend string) string {
if strings.Contains(s, stringToAppend) { if strings.Contains(s, stringToAppend) {

View File

@ -28,110 +28,147 @@ import (
) )
// Command is just that, a command for your application. // Command is just that, a command for your application.
// eg. 'go run' ... 'run' is the command. Cobra requires // E.g. 'go run ...' - 'run' is the command. Cobra requires
// you to define the usage and description as part of your command // you to define the usage and description as part of your command
// definition to ensure usability. // definition to ensure usability.
type Command struct { type Command struct {
// Name is the command name, usually the executable's name. // Use is the one-line usage message.
name string
// The one-line usage message.
Use string Use string
// An array of aliases that can be used instead of the first word in Use.
// Aliases is an array of aliases that can be used instead of the first word in Use.
Aliases []string Aliases []string
// An array of command names for which this command will be suggested - similar to aliases but only suggests.
// SuggestFor is an array of command names for which this command will be suggested -
// similar to aliases but only suggests.
SuggestFor []string SuggestFor []string
// The short description shown in the 'help' output.
// Short is the short description shown in the 'help' output.
Short string Short string
// The long message shown in the 'help <this-command>' output.
// Long is the long message shown in the 'help <this-command>' output.
Long string Long string
// Examples of how to use the command
// Example is examples of how to use the command.
Example string Example string
// List of all valid non-flag arguments that are accepted in bash completions
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string ValidArgs []string
// List of aliases for ValidArgs. These are not suggested to the user in the bash
// completion, but accepted if entered manually. // ArgAliases is List of aliases for ValidArgs.
// These are not suggested to the user in the bash completion,
// but accepted if entered manually.
ArgAliases []string ArgAliases []string
// Custom functions used by the bash autocompletion generator
// BashCompletionFunction is custom functions used by the bash autocompletion generator.
BashCompletionFunction string BashCompletionFunction string
// Is this command deprecated and should print this string when used?
// Deprecated defines, if this command is deprecated and should print this string when used.
Deprecated string Deprecated string
// Is this command hidden and should NOT show up in the list of available commands?
// Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
Hidden bool Hidden bool
// Annotations are key/value pairs that can be used by applications to identify or // Annotations are key/value pairs that can be used by applications to identify or
// group commands // group commands.
Annotations map[string]string Annotations map[string]string
// Full set of flags
flags *flag.FlagSet
// Set of flags childrens of this command will inherit
pflags *flag.FlagSet
// Flags that are declared specifically by this command (not inherited).
lflags *flag.FlagSet
// Inherited flags.
iflags *flag.FlagSet
// All persistent flags of cmd's parents.
parentsPflags *flag.FlagSet
// SilenceErrors is an option to quiet errors down stream
SilenceErrors bool
// Silence Usage is an option to silence usage when an error occurs.
SilenceUsage bool
// The *Run functions are executed in the following order: // The *Run functions are executed in the following order:
// * PersistentPreRun() // * PersistentPreRun()
// * PreRun() // * PreRun()
// * Run() // * Run()
// * PostRun() // * PostRun()
// * PersistentPostRun() // * PersistentPostRun()
// All functions get the same args, the arguments after the command name // All functions get the same args, the arguments after the command name.
// PersistentPreRun: children of this command will inherit and execute //
// PersistentPreRun: children of this command will inherit and execute.
PersistentPreRun func(cmd *Command, args []string) PersistentPreRun func(cmd *Command, args []string)
// PersistentPreRunE: PersistentPreRun but returns an error // PersistentPreRunE: PersistentPreRun but returns an error.
PersistentPreRunE func(cmd *Command, args []string) error PersistentPreRunE func(cmd *Command, args []string) error
// PreRun: children of this command will not inherit. // PreRun: children of this command will not inherit.
PreRun func(cmd *Command, args []string) PreRun func(cmd *Command, args []string)
// PreRunE: PreRun but returns an error // PreRunE: PreRun but returns an error.
PreRunE func(cmd *Command, args []string) error PreRunE func(cmd *Command, args []string) error
// Run: Typically the actual work function. Most commands will only implement this // Run: Typically the actual work function. Most commands will only implement this.
Run func(cmd *Command, args []string) Run func(cmd *Command, args []string)
// RunE: Run but returns an error // RunE: Run but returns an error.
RunE func(cmd *Command, args []string) error RunE func(cmd *Command, args []string) error
// PostRun: run after the Run command. // PostRun: run after the Run command.
PostRun func(cmd *Command, args []string) PostRun func(cmd *Command, args []string)
// PostRunE: PostRun but returns an error // PostRunE: PostRun but returns an error.
PostRunE func(cmd *Command, args []string) error PostRunE func(cmd *Command, args []string) error
// PersistentPostRun: children of this command will inherit and execute after PostRun // PersistentPostRun: children of this command will inherit and execute after PostRun.
PersistentPostRun func(cmd *Command, args []string) PersistentPostRun func(cmd *Command, args []string)
// PersistentPostRunE: PersistentPostRun but returns an error // PersistentPostRunE: PersistentPostRun but returns an error.
PersistentPostRunE func(cmd *Command, args []string) error PersistentPostRunE func(cmd *Command, args []string) error
// DisableAutoGenTag remove
// SilenceErrors is an option to quiet errors down stream.
SilenceErrors bool
// SilenceUsage is an option to silence usage when an error occurs.
SilenceUsage bool
// DisableFlagParsing disables the flag parsing.
// If this is true all flags will be passed to the command as arguments.
DisableFlagParsing bool
// DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
// will be printed by generating docs for this command.
DisableAutoGenTag bool DisableAutoGenTag bool
// Commands is the list of commands supported by this program.
// DisableSuggestions disables the suggestions based on Levenshtein distance
// that go along with 'unknown command' messages.
DisableSuggestions bool
// SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
// Must be > 0.
SuggestionsMinimumDistance int
// name is the command name, usually the executable's name.
name string
// commands is the list of commands supported by this program.
commands []*Command commands []*Command
// Parent Command for this command // parent is a parent command for this command.
parent *Command parent *Command
// max lengths of commands' string lengths for use in padding // Max lengths of commands' string lengths for use in padding.
commandsMaxUseLen int commandsMaxUseLen int
commandsMaxCommandPathLen int commandsMaxCommandPathLen int
commandsMaxNameLen int commandsMaxNameLen int
// is commands slice are sorted or not // commandsAreSorted defines, if command slice are sorted or not.
commandsAreSorted bool commandsAreSorted bool
args []string // actual args parsed from flags // args is actual args parsed from flags.
output io.Writer // out writer if set in SetOutput(w) args []string
usageFunc func(*Command) error // Usage can be defined by application // flagErrorBuf contains all error messages from pflag.
usageTemplate string // Can be defined by Application flagErrorBuf *bytes.Buffer
flagErrorFunc func(*Command, error) error // flags is full set of flags.
helpTemplate string // Can be defined by Application flags *flag.FlagSet
helpFunc func(*Command, []string) // Help can be defined by application // pflags contains persistent flags.
helpCommand *Command // The help command pflags *flag.FlagSet
// The global normalization function that we can use on every pFlag set and children commands // lflags contains local flags.
lflags *flag.FlagSet
// iflags contains inherited flags.
iflags *flag.FlagSet
// parentsPflags is all persistent flags of cmd's parents.
parentsPflags *flag.FlagSet
// globNormFunc is the global normalization function
// that we can use on every pflag set and children commands
globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
// Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages // output is an output writer defined by user.
DisableSuggestions bool output io.Writer
// If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 // usageFunc is usage func defined by user.
SuggestionsMinimumDistance int usageFunc func(*Command) error
// usageTemplate is usage template defined by user.
// Disable the flag parsing. If this is true all flags will be passed to the command as arguments. usageTemplate string
DisableFlagParsing bool // flagErrorFunc is func defined by user and it's called when the parsing of
// flags returns an error.
flagErrorFunc func(*Command, error) error
// helpTemplate is help template defined by user.
helpTemplate string
// helpFunc is help func defined by user.
helpFunc func(*Command, []string)
// helpCommand is command with usage 'help'. If it's not defined by user,
// cobra uses default help command.
helpCommand *Command
} }
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
@ -215,9 +252,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) {
if c.usageFunc != nil { if c.usageFunc != nil {
return c.usageFunc return c.usageFunc
} }
if c.HasParent() { if c.HasParent() {
return c.parent.UsageFunc() return c.Parent().UsageFunc()
} }
return func(c *Command) error { return func(c *Command) error {
c.mergePersistentFlags() c.mergePersistentFlags()
@ -239,10 +275,13 @@ func (c *Command) Usage() error {
// HelpFunc returns either the function set by SetHelpFunc for this command // HelpFunc returns either the function set by SetHelpFunc for this command
// or a parent, or it returns a function with default help behavior. // or a parent, or it returns a function with default help behavior.
func (c *Command) HelpFunc() func(*Command, []string) { func (c *Command) HelpFunc() func(*Command, []string) {
if helpFunc := c.checkHelpFunc(); helpFunc != nil { if c.helpFunc != nil {
return helpFunc return c.helpFunc
} }
return func(*Command, []string) { if c.HasParent() {
return c.Parent().HelpFunc()
}
return func(c *Command, a []string) {
c.mergePersistentFlags() c.mergePersistentFlags()
err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
if err != nil { if err != nil {
@ -251,20 +290,6 @@ func (c *Command) HelpFunc() func(*Command, []string) {
} }
} }
// checkHelpFunc checks if there is helpFunc in ancestors of c.
func (c *Command) checkHelpFunc() func(*Command, []string) {
if c == nil {
return nil
}
if c.helpFunc != nil {
return c.helpFunc
}
if c.HasParent() {
return c.parent.checkHelpFunc()
}
return nil
}
// Help puts out the help for the command. // Help puts out the help for the command.
// Used when a user calls help [command]. // Used when a user calls help [command].
// Can be defined by user by overriding HelpFunc. // Can be defined by user by overriding HelpFunc.
@ -339,8 +364,8 @@ func (c *Command) UsageTemplate() string {
return c.parent.UsageTemplate() return c.parent.UsageTemplate()
} }
return `Usage:{{if .Runnable}} return `Usage:{{if .Runnable}}
{{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases: Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}} {{.NameAndAliases}}{{end}}{{if .HasExample}}
@ -352,10 +377,10 @@ Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "he
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags: Flags:
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasAvailableInheritedFlags}} {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags: Global Flags:
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} {{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
@ -373,70 +398,60 @@ func (c *Command) HelpTemplate() string {
if c.HasParent() { if c.HasParent() {
return c.parent.HelpTemplate() return c.parent.HelpTemplate()
} }
return `{{with or .Long .Short }}{{. | trim}} return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
} }
// Really only used when casting a command to a commander. func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
func (c *Command) resetChildrensParents() { flag := fs.Lookup(name)
for _, x := range c.commands {
x.parent = c
}
}
func hasNoOptDefVal(name string, f *flag.FlagSet) bool {
flag := f.Lookup(name)
if flag == nil { if flag == nil {
return false return false
} }
return len(flag.NoOptDefVal) > 0 return flag.NoOptDefVal != ""
} }
func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
result := false if len(name) == 0 {
fs.VisitAll(func(flag *flag.Flag) { return false
if flag.Shorthand == name && len(flag.NoOptDefVal) > 0 {
result = true
} }
})
return result flag := fs.ShorthandLookup(name[:1])
if flag == nil {
return false
}
return flag.NoOptDefVal != ""
} }
func stripFlags(args []string, c *Command) []string { func stripFlags(args []string, c *Command) []string {
if len(args) < 1 { if len(args) == 0 {
return args return args
} }
c.mergePersistentFlags() c.mergePersistentFlags()
commands := []string{} commands := []string{}
flags := c.Flags()
inQuote := false Loop:
inFlag := false for len(args) > 0 {
for _, y := range args { s := args[0]
if !inQuote { args = args[1:]
switch { switch {
case strings.HasPrefix(y, "\""): case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
inQuote = true // If '--flag arg' then
case strings.Contains(y, "=\""): // delete arg from args.
inQuote = true fallthrough // (do the same as below)
case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
// TODO: this isn't quite right, we should really check ahead for 'true' or 'false' // If '-f arg' then
inFlag = !hasNoOptDefVal(y[2:], c.Flags()) // delete 'arg' from args or break the loop if len(args) <= 1.
case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !shortHasNoOptDefVal(y[1:], c.Flags()): if len(args) <= 1 {
inFlag = true break Loop
case inFlag: } else {
inFlag = false args = args[1:]
case y == "": continue
// strip empty commands, as the go tests expect this to be ok....
case !strings.HasPrefix(y, "-"):
commands = append(commands, y)
inFlag = false
} }
} case s != "" && !strings.HasPrefix(s, "-"):
commands = append(commands, s)
if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") {
inQuote = false
} }
} }
@ -581,18 +596,19 @@ func (c *Command) execute(a []string) (err error) {
// initialize help flag as the last point possible to allow for user // initialize help flag as the last point possible to allow for user
// overriding // overriding
c.initHelpFlag() c.InitDefaultHelpFlag()
err = c.ParseFlags(a) err = c.ParseFlags(a)
if err != nil { if err != nil {
return c.FlagErrorFunc()(c, err) return c.FlagErrorFunc()(c, err)
} }
// If help is called, regardless of other flags, return we want help
// If help is called, regardless of other flags, return we want help.
// Also say we need help if the command isn't runnable. // Also say we need help if the command isn't runnable.
helpVal, err := c.Flags().GetBool("help") helpVal, err := c.Flags().GetBool("help")
if err != nil { if err != nil {
// should be impossible to get here as we always declare a help // should be impossible to get here as we always declare a help
// flag in initHelpFlag() // flag in InitDefaultHelpFlag()
c.Println("\"help\" flag declared as non-bool. Please correct your code") c.Println("\"help\" flag declared as non-bool. Please correct your code")
return err return err
} }
@ -732,10 +748,19 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
return cmd, nil return cmd, nil
} }
func (c *Command) initHelpFlag() { // InitDefaultHelpFlag adds default help flag to c.
// It is called automatically by executing the c or by calling help and usage.
// If c already has help flag, it will do nothing.
func (c *Command) InitDefaultHelpFlag() {
c.mergePersistentFlags() c.mergePersistentFlags()
if c.Flags().Lookup("help") == nil { if c.Flags().Lookup("help") == nil {
c.Flags().BoolP("help", "h", false, "help for "+c.Name()) usage := "help for "
if c.Name() == "" {
usage += "this command"
} else {
usage += c.Name()
}
c.Flags().BoolP("help", "h", false, usage)
} }
} }
@ -759,6 +784,7 @@ func (c *Command) initHelpCmd() {
c.Printf("Unknown help topic %#q\n", args) c.Printf("Unknown help topic %#q\n", args)
c.Root().Usage() c.Root().Usage()
} else { } else {
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
cmd.Help() cmd.Help()
} }
}, },
@ -862,34 +888,34 @@ func (c *Command) Print(i ...interface{}) {
// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. // Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
func (c *Command) Println(i ...interface{}) { func (c *Command) Println(i ...interface{}) {
str := fmt.Sprintln(i...) c.Print(fmt.Sprintln(i...))
c.Print(str)
} }
// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. // Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
func (c *Command) Printf(format string, i ...interface{}) { func (c *Command) Printf(format string, i ...interface{}) {
str := fmt.Sprintf(format, i...) c.Print(fmt.Sprintf(format, i...))
c.Print(str)
} }
// CommandPath returns the full path to this command. // CommandPath returns the full path to this command.
func (c *Command) CommandPath() string { func (c *Command) CommandPath() string {
str := c.Name() if c.HasParent() {
x := c return c.Parent().CommandPath() + " " + c.Name()
for x.HasParent() {
str = x.parent.Name() + " " + str
x = x.parent
} }
return str return c.Name()
} }
// UseLine puts out the full usage for a given command (including parents). // UseLine puts out the full usage for a given command (including parents).
func (c *Command) UseLine() string { func (c *Command) UseLine() string {
str := "" var useline string
if c.HasParent() { if c.HasParent() {
str = c.parent.CommandPath() + " " useline = c.parent.CommandPath() + " " + c.Use
} else {
useline = c.Use
} }
return str + c.Use if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
useline += " [flags]"
}
return useline
} }
// DebugFlags used to determine which flags have been assigned to which commands // DebugFlags used to determine which flags have been assigned to which commands
@ -922,6 +948,7 @@ func (c *Command) DebugFlags() {
} }
}) })
} }
c.Println(x.flagErrorBuf)
if x.HasSubCommands() { if x.HasSubCommands() {
for _, y := range x.commands { for _, y := range x.commands {
debugflags(y) debugflags(y)
@ -934,15 +961,14 @@ func (c *Command) DebugFlags() {
// Name returns the command's name: the first word in the use line. // Name returns the command's name: the first word in the use line.
func (c *Command) Name() string { func (c *Command) Name() string {
if c.name != "" { if c.name == "" {
return c.name
}
name := c.Use name := c.Use
i := strings.Index(name, " ") i := strings.Index(name, " ")
if i >= 0 { if i >= 0 {
name = name[:i] name = name[:i]
} }
c.name = name c.name = name
}
return c.name return c.name
} }
@ -1062,7 +1088,10 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f
func (c *Command) Flags() *flag.FlagSet { func (c *Command) Flags() *flag.FlagSet {
if c.flags == nil { if c.flags == nil {
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.flags.SetOutput(c.OutOrStderr()) if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.flags.SetOutput(c.flagErrorBuf)
} }
return c.flags return c.flags
@ -1087,7 +1116,10 @@ func (c *Command) LocalFlags() *flag.FlagSet {
if c.lflags == nil { if c.lflags == nil {
c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.lflags.SetOutput(c.OutOrStderr()) if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.lflags.SetOutput(c.flagErrorBuf)
} }
c.lflags.SortFlags = c.Flags().SortFlags c.lflags.SortFlags = c.Flags().SortFlags
@ -1107,6 +1139,10 @@ func (c *Command) InheritedFlags() *flag.FlagSet {
if c.iflags == nil { if c.iflags == nil {
c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.iflags.SetOutput(c.flagErrorBuf)
} }
local := c.LocalFlags() local := c.LocalFlags()
@ -1127,17 +1163,22 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet {
func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) PersistentFlags() *flag.FlagSet {
if c.pflags == nil { if c.pflags == nil {
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.pflags.SetOutput(c.OutOrStderr()) if c.flagErrorBuf == nil {
c.flagErrorBuf = new(bytes.Buffer)
}
c.pflags.SetOutput(c.flagErrorBuf)
} }
return c.pflags return c.pflags
} }
// ResetFlags is used in testing. // ResetFlags is used in testing.
func (c *Command) ResetFlags() { func (c *Command) ResetFlags() {
c.flagErrorBuf = new(bytes.Buffer)
c.flagErrorBuf.Reset()
c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.flags.SetOutput(c.OutOrStderr()) c.flags.SetOutput(c.flagErrorBuf)
c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.pflags.SetOutput(c.OutOrStderr()) c.pflags.SetOutput(c.flagErrorBuf)
} }
// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). // HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
@ -1225,8 +1266,8 @@ func (c *Command) Parent() *Command {
// mergePersistentFlags merges c.PersistentFlags() to c.Flags() // mergePersistentFlags merges c.PersistentFlags() to c.Flags()
// and adds missing persistent flags of all parents. // and adds missing persistent flags of all parents.
func (c *Command) mergePersistentFlags() { func (c *Command) mergePersistentFlags() {
c.Flags().AddFlagSet(c.PersistentFlags())
c.updateParentsPflags() c.updateParentsPflags()
c.Flags().AddFlagSet(c.PersistentFlags())
c.Flags().AddFlagSet(c.parentsPflags) c.Flags().AddFlagSet(c.parentsPflags)
} }
@ -1236,7 +1277,7 @@ func (c *Command) mergePersistentFlags() {
func (c *Command) updateParentsPflags() { func (c *Command) updateParentsPflags() {
if c.parentsPflags == nil { if c.parentsPflags == nil {
c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.parentsPflags.SetOutput(c.OutOrStderr()) c.parentsPflags.SetOutput(c.flagErrorBuf)
c.parentsPflags.SortFlags = false c.parentsPflags.SortFlags = false
} }

View File

@ -96,13 +96,6 @@ func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHa
return n return n
} }
// Feedback is special. It writes plainly to the output while
// logging with the standard extra information (date, file, etc)
// Only Println and Printf are currently provided for this
type Feedback struct {
*Notepad
}
// init create the loggers for each level depending on the notepad thresholds // init create the loggers for each level depending on the notepad thresholds
func (n *Notepad) init() { func (n *Notepad) init() {
bothHandle := io.MultiWriter(n.outHandle, n.logHandle) bothHandle := io.MultiWriter(n.outHandle, n.logHandle)
@ -177,19 +170,25 @@ func (n *Notepad) SetFlags(flags int) {
} }
// Feedback is special. It writes plainly to the output while // Feedback is special. It writes plainly to the output while
// logging with the standard extra information (date, file, etc) // logging with the standard extra information (date, file, etc).
// Only Println and Printf are currently provided for this type Feedback struct {
*Notepad
}
func (fb *Feedback) Println(v ...interface{}) { func (fb *Feedback) Println(v ...interface{}) {
s := fmt.Sprintln(v...) s := fmt.Sprintln(v...)
fmt.Print(s) fmt.Print(s)
fb.LOG.Output(2, s) fb.LOG.Output(2, s)
} }
// Feedback is special. It writes plainly to the output while
// logging with the standard extra information (date, file, etc)
// Only Println and Printf are currently provided for this
func (fb *Feedback) Printf(format string, v ...interface{}) { func (fb *Feedback) Printf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...) s := fmt.Sprintf(format, v...)
fmt.Print(s) fmt.Print(s)
fb.LOG.Output(2, s) fb.LOG.Output(2, s)
} }
func (fb *Feedback) Print(v ...interface{}) {
s := fmt.Sprint(v...)
fmt.Print(s)
fb.LOG.Output(2, s)
}

View File

@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
return p return p
} }
// Count like Count only the flag is placed on the CommandLine isntead of a given flag set // Count defines a count flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func Count(name string, usage string) *int { func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage) return CommandLine.CountP(name, "", usage)
} }

162
vendor/github.com/spf13/pflag/flag.go generated vendored
View File

@ -319,6 +319,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name)) return f.lookup(f.normalizeFlagName(name))
} }
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
// It panics, if len(name) > 1.
func (f *FlagSet) ShorthandLookup(name string) *Flag {
if name == "" {
return nil
}
if len(name) > 1 {
msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
c := name[0]
return f.shorthands[c]
}
// lookup returns the Flag structure of the named flag, returning nil if none exists. // lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name] return f.formal[name]
@ -360,7 +376,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
if flag == nil { if flag == nil {
return fmt.Errorf("flag %q does not exist", name) return fmt.Errorf("flag %q does not exist", name)
} }
if len(usageMessage) == 0 { if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name) return fmt.Errorf("deprecated message for flag %q must be set", name)
} }
flag.Deprecated = usageMessage flag.Deprecated = usageMessage
@ -375,7 +391,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
if flag == nil { if flag == nil {
return fmt.Errorf("flag %q does not exist", name) return fmt.Errorf("flag %q does not exist", name)
} }
if len(usageMessage) == 0 { if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name) return fmt.Errorf("deprecated message for flag %q must be set", name)
} }
flag.ShorthandDeprecated = usageMessage flag.ShorthandDeprecated = usageMessage
@ -399,6 +415,12 @@ func Lookup(name string) *Flag {
return CommandLine.Lookup(name) return CommandLine.Lookup(name)
} }
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
func ShorthandLookup(name string) *Flag {
return CommandLine.ShorthandLookup(name)
}
// Set sets the value of the named flag. // Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error { func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name) normalName := f.normalizeFlagName(name)
@ -406,18 +428,28 @@ func (f *FlagSet) Set(name, value string) error {
if !ok { if !ok {
return fmt.Errorf("no such flag -%v", name) return fmt.Errorf("no such flag -%v", name)
} }
err := flag.Value.Set(value) err := flag.Value.Set(value)
if err != nil { if err != nil {
return err var flagName string
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
} else {
flagName = fmt.Sprintf("--%s", flag.Name)
} }
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
if f.actual == nil { if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag) f.actual = make(map[NormalizedName]*Flag)
} }
f.actual[normalName] = flag f.actual[normalName] = flag
f.orderedActual = append(f.orderedActual, flag) f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) if flag.Deprecated != "" {
fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
} }
return nil return nil
} }
@ -599,28 +631,28 @@ func wrap(i, w int, s string) string {
// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no // for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
// wrapping) // wrapping)
func (f *FlagSet) FlagUsagesWrapped(cols int) string { func (f *FlagSet) FlagUsagesWrapped(cols int) string {
x := new(bytes.Buffer) buf := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal)) lines := make([]string, 0, len(f.formal))
maxlen := 0 maxlen := 0
f.VisitAll(func(flag *Flag) { f.VisitAll(func(flag *Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden { if flag.Deprecated != "" || flag.Hidden {
return return
} }
line := "" line := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else { } else {
line = fmt.Sprintf(" --%s", flag.Name) line = fmt.Sprintf(" --%s", flag.Name)
} }
varname, usage := UnquoteUsage(flag) varname, usage := UnquoteUsage(flag)
if len(varname) > 0 { if varname != "" {
line += " " + varname line += " " + varname
} }
if len(flag.NoOptDefVal) > 0 { if flag.NoOptDefVal != "" {
switch flag.Value.Type() { switch flag.Value.Type() {
case "string": case "string":
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
@ -656,10 +688,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
sidx := strings.Index(line, "\x00") sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx) spacing := strings.Repeat(" ", maxlen-sidx)
// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
} }
return x.String() return buf.String()
} }
// FlagUsages returns a string containing the usage information for all flags in // FlagUsages returns a string containing the usage information for all flags in
@ -756,11 +788,10 @@ func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
// AddFlag will add the flag to the FlagSet // AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) { func (f *FlagSet) AddFlag(flag *Flag) {
// Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name) normalizedFlagName := f.normalizeFlagName(flag.Name)
_, alreadythere := f.formal[normalizedFlagName] _, alreadyThere := f.formal[normalizedFlagName]
if alreadythere { if alreadyThere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg) fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names panic(msg) // Happens only if flags are declared with identical names
@ -773,27 +804,29 @@ func (f *FlagSet) AddFlag(flag *Flag) {
f.formal[normalizedFlagName] = flag f.formal[normalizedFlagName] = flag
f.orderedFormal = append(f.orderedFormal, flag) f.orderedFormal = append(f.orderedFormal, flag)
if len(flag.Shorthand) == 0 { if flag.Shorthand == "" {
return return
} }
if len(flag.Shorthand) > 1 { if len(flag.Shorthand) > 1 {
fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
panic("shorthand is more than one character") fmt.Fprintf(f.out(), msg)
panic(msg)
} }
if f.shorthands == nil { if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag) f.shorthands = make(map[byte]*Flag)
} }
c := flag.Shorthand[0] c := flag.Shorthand[0]
old, alreadythere := f.shorthands[c] used, alreadyThere := f.shorthands[c]
if alreadythere { if alreadyThere {
fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
panic("shorthand redefinition") fmt.Fprintf(f.out(), msg)
panic(msg)
} }
f.shorthands[c] = flag f.shorthands[c] = flag
} }
// AddFlagSet adds one FlagSet to another. If a flag is already present in f // AddFlagSet adds one FlagSet to another. If a flag is already present in f
// the flag from newSet will be ignored // the flag from newSet will be ignored.
func (f *FlagSet) AddFlagSet(newSet *FlagSet) { func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil { if newSet == nil {
return return
@ -841,35 +874,6 @@ func (f *FlagSet) usage() {
} }
} }
func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
if err := flag.Value.Set(value); err != nil {
return f.failf("invalid argument %q for %s: %v", value, origArg, err)
}
// mark as visited for Visit()
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
return nil
}
func containsShorthand(arg, shorthand string) bool {
// filter out flags --<flag_name>
if strings.HasPrefix(arg, "-") {
return false
}
arg = strings.SplitN(arg, "=", 2)[0]
return strings.Contains(arg, shorthand)
}
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args a = args
name := s[2:] name := s[2:]
@ -877,10 +881,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("bad flag syntax: %s", s) err = f.failf("bad flag syntax: %s", s)
return return
} }
split := strings.SplitN(name, "=", 2) split := strings.SplitN(name, "=", 2)
name = split[0] name = split[0]
flag, alreadythere := f.formal[f.normalizeFlagName(name)] flag, exists := f.formal[f.normalizeFlagName(name)]
if !alreadythere { if !exists {
if name == "help" { // special case for nice help message. if name == "help" { // special case for nice help message.
f.usage() f.usage()
return a, ErrHelp return a, ErrHelp
@ -888,11 +893,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("unknown flag: --%s", name) err = f.failf("unknown flag: --%s", name)
return return
} }
var value string var value string
if len(split) == 2 { if len(split) == 2 {
// '--flag=arg' // '--flag=arg'
value = split[1] value = split[1]
} else if len(flag.NoOptDefVal) > 0 { } else if flag.NoOptDefVal != "" {
// '--flag' (arg was optional) // '--flag' (arg was optional)
value = flag.NoOptDefVal value = flag.NoOptDefVal
} else if len(a) > 0 { } else if len(a) > 0 {
@ -904,7 +910,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("flag needs an argument: %s", s) err = f.failf("flag needs an argument: %s", s)
return return
} }
err = fn(flag, value, s)
err = fn(flag, value)
return return
} }
@ -912,38 +919,49 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
if strings.HasPrefix(shorthands, "test.") { if strings.HasPrefix(shorthands, "test.") {
return return
} }
outArgs = args outArgs = args
outShorts = shorthands[1:] outShorts = shorthands[1:]
c := shorthands[0] c := shorthands[0]
flag, alreadythere := f.shorthands[c] flag, exists := f.shorthands[c]
if !alreadythere { if !exists {
if c == 'h' { // special case for nice help message. if c == 'h' { // special case for nice help message.
f.usage() f.usage()
err = ErrHelp err = ErrHelp
return return
} }
//TODO continue on error
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return return
} }
var value string var value string
if len(shorthands) > 2 && shorthands[1] == '=' { if len(shorthands) > 2 && shorthands[1] == '=' {
// '-f=arg'
value = shorthands[2:] value = shorthands[2:]
outShorts = "" outShorts = ""
} else if len(flag.NoOptDefVal) > 0 { } else if flag.NoOptDefVal != "" {
// '-f' (arg was optional)
value = flag.NoOptDefVal value = flag.NoOptDefVal
} else if len(shorthands) > 1 { } else if len(shorthands) > 1 {
// '-farg'
value = shorthands[1:] value = shorthands[1:]
outShorts = "" outShorts = ""
} else if len(args) > 0 { } else if len(args) > 0 {
// '-f arg'
value = args[0] value = args[0]
outArgs = args[1:] outArgs = args[1:]
} else { } else {
// '-f' (arg was required)
err = f.failf("flag needs an argument: %q in -%s", c, shorthands) err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return return
} }
err = fn(flag, value, shorthands)
if flag.ShorthandDeprecated != "" {
fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
err = fn(flag, value)
return return
} }
@ -951,6 +969,7 @@ func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []stri
a = args a = args
shorthands := s[1:] shorthands := s[1:]
// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
for len(shorthands) > 0 { for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil { if err != nil {
@ -998,13 +1017,18 @@ func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
// The return value will be ErrHelp if -help was set but not defined. // The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error { func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true f.parsed = true
f.args = make([]string, 0, len(arguments))
assign := func(flag *Flag, value, origArg string) error { if len(arguments) < 0 {
return f.setFlag(flag, value, origArg) return nil
} }
err := f.parseArgs(arguments, assign) f.args = make([]string, 0, len(arguments))
set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value)
}
err := f.parseArgs(arguments, set)
if err != nil { if err != nil {
switch f.errorHandling { switch f.errorHandling {
case ContinueOnError: case ContinueOnError:
@ -1018,7 +1042,7 @@ func (f *FlagSet) Parse(arguments []string) error {
return nil return nil
} }
type parseFunc func(flag *Flag, value, origArg string) error type parseFunc func(flag *Flag, value string) error
// ParseAll parses flag definitions from the argument list, which should not // ParseAll parses flag definitions from the argument list, which should not
// include the command name. The arguments for fn are flag and value. Must be // include the command name. The arguments for fn are flag and value. Must be
@ -1029,11 +1053,7 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string)
f.parsed = true f.parsed = true
f.args = make([]string, 0, len(arguments)) f.args = make([]string, 0, len(arguments))
assign := func(flag *Flag, value, origArg string) error { err := f.parseArgs(arguments, fn)
return fn(flag, value)
}
err := f.parseArgs(arguments, assign)
if err != nil { if err != nil {
switch f.errorHandling { switch f.errorHandling {
case ContinueOnError: case ContinueOnError:

View File

@ -237,6 +237,11 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l
err.SetValue(value) err.SetValue(value)
err.SetDetails(details) err.SetDetails(details)
details["field"] = err.Field() details["field"] = err.Field()
if _, exists := details["context"]; !exists && context != nil {
details["context"] = context.String()
}
err.SetDescription(formatErrorDescription(d, details)) err.SetDescription(formatErrorDescription(d, details))
} }
@ -257,6 +262,10 @@ func formatErrorDescription(s string, details ErrorDetails) string {
errorTemplates.Lock() errorTemplates.Lock()
tpl = errorTemplates.New(s) tpl = errorTemplates.New(s)
if ErrorTemplateFuncs != nil {
tpl.Funcs(ErrorTemplateFuncs)
}
tpl, err = tpl.Parse(s) tpl, err = tpl.Parse(s)
errorTemplates.Unlock() errorTemplates.Unlock()

View File

@ -31,6 +31,7 @@ import (
"errors" "errors"
"reflect" "reflect"
"regexp" "regexp"
"text/template"
"github.com/xeipuuv/gojsonreference" "github.com/xeipuuv/gojsonreference"
) )
@ -39,6 +40,9 @@ var (
// Locale is the default locale to use // Locale is the default locale to use
// Library users can overwrite with their own implementation // Library users can overwrite with their own implementation
Locale locale = DefaultLocale{} Locale locale = DefaultLocale{}
// ErrorTemplateFuncs allows you to define custom template funcs for use in localization.
ErrorTemplateFuncs template.FuncMap
) )
func NewSchema(l JSONLoader) (*Schema, error) { func NewSchema(l JSONLoader) (*Schema, error) {

View File

@ -10,8 +10,8 @@
// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go // System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
// //
TEXT ·sysvicall6(SB),NOSPLIT,$0-64 TEXT ·sysvicall6(SB),NOSPLIT,$0-88
JMP syscall·sysvicall6(SB) JMP syscall·sysvicall6(SB)
TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64 TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
JMP syscall·rawSysvicall6(SB) JMP syscall·rawSysvicall6(SB)

View File

@ -13,9 +13,10 @@ import "unsafe"
// Round the length of a raw sockaddr up to align it properly. // Round the length of a raw sockaddr up to align it properly.
func cmsgAlignOf(salen int) int { func cmsgAlignOf(salen int) int {
salign := sizeofPtr salign := sizeofPtr
// NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels // NOTE: It seems like 64-bit Darwin, DragonFly BSD and
// still require 32-bit aligned access to network subsystem. // Solaris kernels still require 32-bit aligned access to
if darwin64Bit || dragonfly64Bit { // network subsystem.
if darwin64Bit || dragonfly64Bit || solaris64Bit {
salign = 4 salign = 4
} }
return (salen + salign - 1) & ^(salign - 1) return (salen + salign - 1) & ^(salign - 1)

View File

@ -1,8 +1,8 @@
// Copyright 2009,2010 The Go Authors. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// FreeBSD system calls. // DragonFly BSD system calls.
// This file is compiled as ordinary Go code, // This file is compiled as ordinary Go code,
// but it is also input to mksyscall, // but it is also input to mksyscall,
// which parses the //sys lines and generates system call stubs. // which parses the //sys lines and generates system call stubs.
@ -57,7 +57,7 @@ func nametomib(name string) (mib []_C_int, err error) {
} }
func direntIno(buf []byte) (uint64, bool) { func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
} }
func direntReclen(buf []byte) (uint64, bool) { func direntReclen(buf []byte) (uint64, bool) {
@ -92,6 +92,24 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
return extpwrite(fd, p, 0, offset) return extpwrite(fd, p, 0, offset)
} }
func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept4(fd, &rsa, &len, flags)
if err != nil {
return
}
if len > SizeofSockaddrAny {
panic("RawSockaddrAny too small")
}
sa, err = anyToSockaddr(&rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
var _p0 unsafe.Pointer var _p0 unsafe.Pointer
var bufsize uintptr var bufsize uintptr
@ -199,6 +217,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
/* /*
* Unimplemented * Unimplemented
@ -234,6 +253,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
// Kdebug_trace // Kdebug_trace
// Sigreturn // Sigreturn
// Mmap // Mmap
// Mlock
// Munlock
// Atsocket // Atsocket
// Kqueue_from_portset_np // Kqueue_from_portset_np
// Kqueue_portset // Kqueue_portset
@ -326,6 +347,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
// Lio_listio // Lio_listio
// __pthread_cond_wait // __pthread_cond_wait
// Iopolicysys // Iopolicysys
// Mlockall
// Munlockall
// __pthread_kill // __pthread_kill
// __pthread_sigmask // __pthread_sigmask
// __sigwait // __sigwait

View File

@ -1038,6 +1038,7 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Dup(oldfd int) (fd int, err error) //sys Dup(oldfd int) (fd int, err error)
//sys Dup3(oldfd int, newfd int, flags int) (err error) //sys Dup3(oldfd int, newfd int, flags int) (err error)
//sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate(size int) (fd int, err error)

View File

@ -7,6 +7,7 @@
package unix package unix
//sys Dup2(oldfd int, newfd int) (err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fchown(fd int, uid int, gid int) (err error) //sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error)
@ -182,9 +183,9 @@ func fillStat_t(s *Stat_t, st *stat_t) {
s.Blocks = st.Blocks s.Blocks = st.Blocks
} }
func (r *PtraceRegs) PC() uint64 { return r.Regs[64] } func (r *PtraceRegs) PC() uint64 { return r.Epc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc }
func (iov *Iovec) SetLen(length int) { func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length) iov.Len = uint64(length)

View File

@ -211,9 +211,9 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
return setrlimit(resource, &rl) return setrlimit(resource, &rl)
} }
func (r *PtraceRegs) PC() uint64 { return uint64(r.Regs[64]) } func (r *PtraceRegs) PC() uint64 { return r.Epc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = uint32(pc) } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc }
func (iov *Iovec) SetLen(length int) { func (iov *Iovec) SetLen(length int) {
iov.Len = uint32(length) iov.Len = uint32(length)

View File

@ -422,7 +422,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
return return
} }
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
var msg Msghdr var msg Msghdr
@ -441,7 +441,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
iov.Base = &dummy iov.Base = &dummy
iov.SetLen(1) iov.SetLen(1)
} }
msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) msg.Accrightslen = int32(len(oob))
} }
msg.Iov = &iov msg.Iov = &iov
msg.Iovlen = 1 msg.Iovlen = 1
@ -461,7 +461,7 @@ func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
return return
} }
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.sendmsg //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
var ptr unsafe.Pointer var ptr unsafe.Pointer
@ -487,7 +487,7 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
iov.Base = &dummy iov.Base = &dummy
iov.SetLen(1) iov.SetLen(1)
} }
msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) msg.Accrightslen = int32(len(oob))
} }
msg.Iov = &iov msg.Iov = &iov
msg.Iovlen = 1 msg.Iovlen = 1
@ -583,6 +583,7 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) {
//sys Fdatasync(fd int) (err error) //sys Fdatasync(fd int) (err error)
//sys Fpathconf(fd int, name int) (val int, err error) //sys Fpathconf(fd int, name int) (val int, err error)
//sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatvfs(fd int, vfsstat *Statvfs_t) (err error)
//sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error)
//sysnb Getgid() (gid int) //sysnb Getgid() (gid int)
//sysnb Getpid() (pid int) //sysnb Getpid() (pid int)
@ -599,7 +600,7 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) {
//sys Kill(pid int, signum syscall.Signal) (err error) //sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error) //sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error) //sys Link(path string, link string) (err error)
//sys Listen(s int, backlog int) (err error) = libsocket.listen //sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
//sys Lstat(path string, stat *Stat_t) (err error) //sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error) //sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error) //sys Mkdir(path string, mode uint32) (err error)
@ -639,6 +640,7 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) {
//sysnb Setuid(uid int) (err error) //sysnb Setuid(uid int) (err error)
//sys Shutdown(s int, how int) (err error) = libsocket.shutdown //sys Shutdown(s int, how int) (err error) = libsocket.shutdown
//sys Stat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error)
//sys Statvfs(path string, vfsstat *Statvfs_t) (err error)
//sys Symlink(path string, link string) (err error) //sys Symlink(path string, link string) (err error)
//sys Sync() (err error) //sys Sync() (err error)
//sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Times(tms *Tms) (ticks uintptr, err error)
@ -652,15 +654,15 @@ func IoctlGetTermio(fd int, req int) (*Termio, error) {
//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unlinkat(dirfd int, path string, flags int) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error)
//sys Utime(path string, buf *Utimbuf) (err error) //sys Utime(path string, buf *Utimbuf) (err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_bind
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_connect
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.sendto //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_sendto
//sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.socket //sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.__xnet_socket
//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.socketpair //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.__xnet_socketpair
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt //sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.__xnet_getsockopt
//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom

View File

@ -23,6 +23,7 @@ const (
darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8 darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8
dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8 dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8
netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4 netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
solaris64Bit = runtime.GOOS == "solaris" && sizeofPtr == 8
) )
// Do the interface allocations only once for common // Do the interface allocations only once for common

View File

@ -37,6 +37,7 @@ package unix
#include <sys/signal.h> #include <sys/signal.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/time.h> #include <sys/time.h>
#include <sys/times.h> #include <sys/times.h>
#include <sys/types.h> #include <sys/types.h>
@ -139,6 +140,12 @@ type Flock_t C.struct_flock
type Dirent C.struct_dirent type Dirent C.struct_dirent
// Filesystems
type _Fsblkcnt_t C.fsblkcnt_t
type Statvfs_t C.struct_statvfs
// Sockets // Sockets
type RawSockaddrInet4 C.struct_sockaddr_in type RawSockaddrInet4 C.struct_sockaddr_in

View File

@ -1,5 +1,5 @@
// mkerrors.sh -m64 // mkerrors.sh -m64
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,dragonfly // +build amd64,dragonfly
@ -37,8 +37,8 @@ const (
AF_MAX = 0x24 AF_MAX = 0x24
AF_MPLS = 0x22 AF_MPLS = 0x22
AF_NATM = 0x1d AF_NATM = 0x1d
AF_NETBIOS = 0x6
AF_NETGRAPH = 0x20 AF_NETGRAPH = 0x20
AF_NS = 0x6
AF_OSI = 0x7 AF_OSI = 0x7
AF_PUP = 0x4 AF_PUP = 0x4
AF_ROUTE = 0x11 AF_ROUTE = 0x11
@ -46,6 +46,7 @@ const (
AF_SNA = 0xb AF_SNA = 0xb
AF_UNIX = 0x1 AF_UNIX = 0x1
AF_UNSPEC = 0x0 AF_UNSPEC = 0x0
ALTWERASE = 0x200
B0 = 0x0 B0 = 0x0
B110 = 0x6e B110 = 0x6e
B115200 = 0x1c200 B115200 = 0x1c200
@ -141,7 +142,22 @@ const (
BRKINT = 0x2 BRKINT = 0x2
CFLUSH = 0xf CFLUSH = 0xf
CLOCAL = 0x8000 CLOCAL = 0x8000
CLOCK_MONOTONIC = 0x4
CLOCK_MONOTONIC_FAST = 0xc
CLOCK_MONOTONIC_PRECISE = 0xb
CLOCK_PROCESS_CPUTIME_ID = 0xf
CLOCK_PROF = 0x2
CLOCK_REALTIME = 0x0
CLOCK_REALTIME_FAST = 0xa
CLOCK_REALTIME_PRECISE = 0x9
CLOCK_SECOND = 0xd
CLOCK_THREAD_CPUTIME_ID = 0xe
CLOCK_UPTIME = 0x5
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
CREAD = 0x800 CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0 CS5 = 0x0
CS6 = 0x100 CS6 = 0x100
CS7 = 0x200 CS7 = 0x200
@ -286,24 +302,28 @@ const (
ECHOPRT = 0x20 ECHOPRT = 0x20
EVFILT_AIO = -0x3 EVFILT_AIO = -0x3
EVFILT_EXCEPT = -0x8 EVFILT_EXCEPT = -0x8
EVFILT_FS = -0xa
EVFILT_MARKER = 0xf EVFILT_MARKER = 0xf
EVFILT_PROC = -0x5 EVFILT_PROC = -0x5
EVFILT_READ = -0x1 EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6 EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0x8 EVFILT_SYSCOUNT = 0xa
EVFILT_TIMER = -0x7 EVFILT_TIMER = -0x7
EVFILT_USER = -0x9
EVFILT_VNODE = -0x4 EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2 EVFILT_WRITE = -0x2
EV_ADD = 0x1 EV_ADD = 0x1
EV_CLEAR = 0x20 EV_CLEAR = 0x20
EV_DELETE = 0x2 EV_DELETE = 0x2
EV_DISABLE = 0x8 EV_DISABLE = 0x8
EV_DISPATCH = 0x80
EV_ENABLE = 0x4 EV_ENABLE = 0x4
EV_EOF = 0x8000 EV_EOF = 0x8000
EV_ERROR = 0x4000 EV_ERROR = 0x4000
EV_FLAG1 = 0x2000 EV_FLAG1 = 0x2000
EV_NODATA = 0x1000 EV_NODATA = 0x1000
EV_ONESHOT = 0x10 EV_ONESHOT = 0x10
EV_RECEIPT = 0x40
EV_SYSFLAGS = 0xf000 EV_SYSFLAGS = 0xf000
EXTA = 0x4b00 EXTA = 0x4b00
EXTB = 0x9600 EXTB = 0x9600
@ -679,7 +699,6 @@ const (
IPPROTO_SATEXPAK = 0x40 IPPROTO_SATEXPAK = 0x40
IPPROTO_SATMON = 0x45 IPPROTO_SATMON = 0x45
IPPROTO_SCCSP = 0x60 IPPROTO_SCCSP = 0x60
IPPROTO_SCTP = 0x84
IPPROTO_SDRP = 0x2a IPPROTO_SDRP = 0x2a
IPPROTO_SEP = 0x21 IPPROTO_SEP = 0x21
IPPROTO_SKIP = 0x39 IPPROTO_SKIP = 0x39
@ -730,6 +749,7 @@ const (
IPV6_LEAVE_GROUP = 0xd IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff IPV6_MAXHLIM = 0xff
IPV6_MAXPACKET = 0xffff IPV6_MAXPACKET = 0xffff
IPV6_MINHLIM = 0x28
IPV6_MMTU = 0x500 IPV6_MMTU = 0x500
IPV6_MSFILTER = 0x4a IPV6_MSFILTER = 0x4a
IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_HOPS = 0xa
@ -778,6 +798,7 @@ const (
IP_FW_FLUSH = 0x34 IP_FW_FLUSH = 0x34
IP_FW_GET = 0x36 IP_FW_GET = 0x36
IP_FW_RESETLOG = 0x37 IP_FW_RESETLOG = 0x37
IP_FW_X = 0x31
IP_FW_ZERO = 0x35 IP_FW_ZERO = 0x35
IP_HDRINCL = 0x2 IP_HDRINCL = 0x2
IP_IPSEC_POLICY = 0x15 IP_IPSEC_POLICY = 0x15
@ -833,6 +854,7 @@ const (
MADV_SETMAP = 0xb MADV_SETMAP = 0xb
MADV_WILLNEED = 0x3 MADV_WILLNEED = 0x3
MAP_ANON = 0x1000 MAP_ANON = 0x1000
MAP_ANONYMOUS = 0x1000
MAP_COPY = 0x2 MAP_COPY = 0x2
MAP_FILE = 0x0 MAP_FILE = 0x0
MAP_FIXED = 0x10 MAP_FIXED = 0x10
@ -851,6 +873,7 @@ const (
MAP_VPAGETABLE = 0x2000 MAP_VPAGETABLE = 0x2000
MCL_CURRENT = 0x1 MCL_CURRENT = 0x1
MCL_FUTURE = 0x2 MCL_FUTURE = 0x2
MSG_CMSG_CLOEXEC = 0x1000
MSG_CTRUNC = 0x20 MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4 MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80 MSG_DONTWAIT = 0x80
@ -860,11 +883,11 @@ const (
MSG_FMASK = 0xffff0000 MSG_FMASK = 0xffff0000
MSG_FNONBLOCKING = 0x20000 MSG_FNONBLOCKING = 0x20000
MSG_NOSIGNAL = 0x400 MSG_NOSIGNAL = 0x400
MSG_NOTIFICATION = 0x200
MSG_OOB = 0x1 MSG_OOB = 0x1
MSG_PEEK = 0x2 MSG_PEEK = 0x2
MSG_SYNC = 0x800 MSG_SYNC = 0x800
MSG_TRUNC = 0x10 MSG_TRUNC = 0x10
MSG_UNUSED09 = 0x200
MSG_WAITALL = 0x40 MSG_WAITALL = 0x40
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
@ -875,12 +898,19 @@ const (
NET_RT_IFLIST = 0x3 NET_RT_IFLIST = 0x3
NET_RT_MAXID = 0x4 NET_RT_MAXID = 0x4
NOFLSH = 0x80000000 NOFLSH = 0x80000000
NOKERNINFO = 0x2000000
NOTE_ATTRIB = 0x8 NOTE_ATTRIB = 0x8
NOTE_CHILD = 0x4 NOTE_CHILD = 0x4
NOTE_DELETE = 0x1 NOTE_DELETE = 0x1
NOTE_EXEC = 0x20000000 NOTE_EXEC = 0x20000000
NOTE_EXIT = 0x80000000 NOTE_EXIT = 0x80000000
NOTE_EXTEND = 0x4 NOTE_EXTEND = 0x4
NOTE_FFAND = 0x40000000
NOTE_FFCOPY = 0xc0000000
NOTE_FFCTRLMASK = 0xc0000000
NOTE_FFLAGSMASK = 0xffffff
NOTE_FFNOP = 0x0
NOTE_FFOR = 0x80000000
NOTE_FORK = 0x40000000 NOTE_FORK = 0x40000000
NOTE_LINK = 0x10 NOTE_LINK = 0x10
NOTE_LOWAT = 0x1 NOTE_LOWAT = 0x1
@ -891,6 +921,7 @@ const (
NOTE_REVOKE = 0x40 NOTE_REVOKE = 0x40
NOTE_TRACK = 0x1 NOTE_TRACK = 0x1
NOTE_TRACKERR = 0x2 NOTE_TRACKERR = 0x2
NOTE_TRIGGER = 0x1000000
NOTE_WRITE = 0x2 NOTE_WRITE = 0x2
OCRNL = 0x10 OCRNL = 0x10
ONLCR = 0x2 ONLCR = 0x2
@ -898,6 +929,7 @@ const (
ONOCR = 0x20 ONOCR = 0x20
ONOEOT = 0x8 ONOEOT = 0x8
OPOST = 0x1 OPOST = 0x1
OXTABS = 0x4
O_ACCMODE = 0x3 O_ACCMODE = 0x3
O_APPEND = 0x8 O_APPEND = 0x8
O_ASYNC = 0x40 O_ASYNC = 0x40
@ -910,14 +942,11 @@ const (
O_FAPPEND = 0x100000 O_FAPPEND = 0x100000
O_FASYNCWRITE = 0x800000 O_FASYNCWRITE = 0x800000
O_FBLOCKING = 0x40000 O_FBLOCKING = 0x40000
O_FBUFFERED = 0x2000000 O_FMASK = 0xfc0000
O_FMASK = 0x7fc0000
O_FNONBLOCKING = 0x80000 O_FNONBLOCKING = 0x80000
O_FOFFSET = 0x200000 O_FOFFSET = 0x200000
O_FSYNC = 0x80 O_FSYNC = 0x80
O_FSYNCWRITE = 0x400000 O_FSYNCWRITE = 0x400000
O_FUNBUFFERED = 0x1000000
O_MAPONREAD = 0x4000000
O_NDELAY = 0x4 O_NDELAY = 0x4
O_NOCTTY = 0x8000 O_NOCTTY = 0x8000
O_NOFOLLOW = 0x100 O_NOFOLLOW = 0x100
@ -1096,8 +1125,10 @@ const (
SIOCSLIFPHYADDR = 0x8118694a SIOCSLIFPHYADDR = 0x8118694a
SIOCSLOWAT = 0x80047302 SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308 SIOCSPGRP = 0x80047308
SOCK_CLOEXEC = 0x10000000
SOCK_DGRAM = 0x2 SOCK_DGRAM = 0x2
SOCK_MAXADDRLEN = 0xff SOCK_MAXADDRLEN = 0xff
SOCK_NONBLOCK = 0x20000000
SOCK_RAW = 0x3 SOCK_RAW = 0x3
SOCK_RDM = 0x4 SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5 SOCK_SEQPACKET = 0x5
@ -1107,6 +1138,7 @@ const (
SO_ACCEPTCONN = 0x2 SO_ACCEPTCONN = 0x2
SO_ACCEPTFILTER = 0x1000 SO_ACCEPTFILTER = 0x1000
SO_BROADCAST = 0x20 SO_BROADCAST = 0x20
SO_CPUHINT = 0x1030
SO_DEBUG = 0x1 SO_DEBUG = 0x1
SO_DONTROUTE = 0x10 SO_DONTROUTE = 0x10
SO_ERROR = 0x1007 SO_ERROR = 0x1007
@ -1127,8 +1159,12 @@ const (
SO_TYPE = 0x1008 SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40 SO_USELOOPBACK = 0x40
TCIFLUSH = 0x1 TCIFLUSH = 0x1
TCIOFF = 0x3
TCIOFLUSH = 0x3 TCIOFLUSH = 0x3
TCION = 0x4
TCOFLUSH = 0x2 TCOFLUSH = 0x2
TCOOFF = 0x1
TCOON = 0x2
TCP_FASTKEEP = 0x80 TCP_FASTKEEP = 0x80
TCP_KEEPCNT = 0x400 TCP_KEEPCNT = 0x400
TCP_KEEPIDLE = 0x100 TCP_KEEPIDLE = 0x100
@ -1227,6 +1263,8 @@ const (
VKILL = 0x5 VKILL = 0x5
VLNEXT = 0xe VLNEXT = 0xe
VMIN = 0x10 VMIN = 0x10
VM_BCACHE_SIZE_MAX = 0x0
VM_SWZONE_SIZE_MAX = 0x4000000000
VQUIT = 0x9 VQUIT = 0x9
VREPRINT = 0x6 VREPRINT = 0x6
VSTART = 0xc VSTART = 0xc

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x1264 BLKFRASET = 0x1264
BLKGETSIZE = 0x1260 BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80041272 BLKGETSIZE64 = 0x80041272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263 BLKRAGET = 0x1263
BLKRASET = 0x1262 BLKRASET = 0x1262
BLKROGET = 0x125e BLKROGET = 0x125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x1000 FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x1264 BLKFRASET = 0x1264
BLKGETSIZE = 0x1260 BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80081272 BLKGETSIZE64 = 0x80081272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263 BLKRAGET = 0x1263
BLKRASET = 0x1262 BLKRASET = 0x1262
BLKROGET = 0x125e BLKROGET = 0x125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x1000 FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x1264 BLKFRASET = 0x1264
BLKGETSIZE = 0x1260 BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80041272 BLKGETSIZE64 = 0x80041272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263 BLKRAGET = 0x1263
BLKRASET = 0x1262 BLKRASET = 0x1262
BLKROGET = 0x125e BLKROGET = 0x125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x1000 FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -809,6 +828,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -822,6 +842,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -838,6 +860,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x1264 BLKFRASET = 0x1264
BLKGETSIZE = 0x1260 BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80081272 BLKGETSIZE64 = 0x80081272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263 BLKRAGET = 0x1263
BLKRASET = 0x1262 BLKRASET = 0x1262
BLKROGET = 0x125e BLKROGET = 0x125e
@ -450,6 +451,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x1000 FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40041272 BLKGETSIZE64 = 0x40041272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x2000 FLUSHO = 0x2000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40081272 BLKGETSIZE64 = 0x40081272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x2000 FLUSHO = 0x2000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40081272 BLKGETSIZE64 = 0x40081272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x2000 FLUSHO = 0x2000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40041272 BLKGETSIZE64 = 0x40041272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x2000 FLUSHO = 0x2000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -810,6 +829,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -823,6 +843,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -839,6 +861,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40081272 BLKGETSIZE64 = 0x40081272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x4000 FF1 = 0x4000
FFDLY = 0x4000 FFDLY = 0x4000
FLUSHO = 0x800000 FLUSHO = 0x800000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -809,6 +828,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -822,6 +842,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -838,6 +860,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x20001264 BLKFRASET = 0x20001264
BLKGETSIZE = 0x20001260 BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40081272 BLKGETSIZE64 = 0x40081272
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263 BLKRAGET = 0x20001263
BLKRASET = 0x20001262 BLKRASET = 0x20001262
BLKROGET = 0x2000125e BLKROGET = 0x2000125e
@ -449,6 +450,24 @@ const (
FF1 = 0x4000 FF1 = 0x4000
FFDLY = 0x4000 FFDLY = 0x4000
FLUSHO = 0x800000 FLUSHO = 0x800000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -809,6 +828,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -822,6 +842,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -838,6 +860,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -168,6 +168,7 @@ const (
BLKFRASET = 0x1264 BLKFRASET = 0x1264
BLKGETSIZE = 0x1260 BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80081272 BLKGETSIZE64 = 0x80081272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263 BLKRAGET = 0x1263
BLKRASET = 0x1262 BLKRASET = 0x1262
BLKROGET = 0x125e BLKROGET = 0x125e
@ -449,6 +450,24 @@ const (
FF1 = 0x8000 FF1 = 0x8000
FFDLY = 0x8000 FFDLY = 0x8000
FLUSHO = 0x1000 FLUSHO = 0x1000
FS_ENCRYPTION_MODE_AES_256_CBC = 0x3
FS_ENCRYPTION_MODE_AES_256_CTS = 0x4
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
FS_KEY_DESCRIPTOR_SIZE = 0x8
FS_KEY_DESC_PREFIX = "fscrypt:"
FS_KEY_DESC_PREFIX_SIZE = 0x8
FS_MAX_KEY_SIZE = 0x40
FS_POLICY_FLAGS_PAD_16 = 0x2
FS_POLICY_FLAGS_PAD_32 = 0x3
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
FS_POLICY_FLAGS_VALID = 0x3
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406 F_DUPFD_CLOEXEC = 0x406
F_EXLCK = 0x4 F_EXLCK = 0x4
@ -809,6 +828,7 @@ const (
MS_ACTIVE = 0x40000000 MS_ACTIVE = 0x40000000
MS_ASYNC = 0x1 MS_ASYNC = 0x1
MS_BIND = 0x1000 MS_BIND = 0x1000
MS_BORN = 0x20000000
MS_DIRSYNC = 0x80 MS_DIRSYNC = 0x80
MS_INVALIDATE = 0x2 MS_INVALIDATE = 0x2
MS_I_VERSION = 0x800000 MS_I_VERSION = 0x800000
@ -822,6 +842,8 @@ const (
MS_NODEV = 0x4 MS_NODEV = 0x4
MS_NODIRATIME = 0x800 MS_NODIRATIME = 0x800
MS_NOEXEC = 0x8 MS_NOEXEC = 0x8
MS_NOREMOTELOCK = 0x8000000
MS_NOSEC = 0x10000000
MS_NOSUID = 0x2 MS_NOSUID = 0x2
MS_NOUSER = -0x80000000 MS_NOUSER = -0x80000000
MS_POSIXACL = 0x10000 MS_POSIXACL = 0x10000
@ -838,6 +860,7 @@ const (
MS_SYNC = 0x4 MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10 MS_SYNCHRONOUS = 0x10
MS_UNBINDABLE = 0x20000 MS_UNBINDABLE = 0x20000
MS_VERBOSE = 0x8000
NAME_MAX = 0xff NAME_MAX = 0xff
NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_ADD_MEMBERSHIP = 0x1
NETLINK_AUDIT = 0x9 NETLINK_AUDIT = 0x9

View File

@ -1,5 +1,5 @@
// mkerrors.sh -m64 // mkerrors.sh -m64
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,solaris // +build amd64,solaris
@ -159,7 +159,12 @@ const (
BPF_W = 0x0 BPF_W = 0x0
BPF_X = 0x8 BPF_X = 0x8
BRKINT = 0x2 BRKINT = 0x2
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
CBAUD = 0xf
CFLUSH = 0xf CFLUSH = 0xf
CIBAUD = 0xf0000
CLOCAL = 0x800 CLOCAL = 0x800
CLOCK_HIGHRES = 0x4 CLOCK_HIGHRES = 0x4
CLOCK_LEVEL = 0xa CLOCK_LEVEL = 0xa
@ -169,7 +174,13 @@ const (
CLOCK_REALTIME = 0x3 CLOCK_REALTIME = 0x3
CLOCK_THREAD_CPUTIME_ID = 0x2 CLOCK_THREAD_CPUTIME_ID = 0x2
CLOCK_VIRTUAL = 0x1 CLOCK_VIRTUAL = 0x1
CR0 = 0x0
CR1 = 0x200
CR2 = 0x400
CR3 = 0x600
CRDLY = 0x600
CREAD = 0x80 CREAD = 0x80
CRTSCTS = 0x80000000
CS5 = 0x0 CS5 = 0x0
CS6 = 0x10 CS6 = 0x10
CS7 = 0x20 CS7 = 0x20
@ -276,6 +287,9 @@ const (
FD_CLOEXEC = 0x1 FD_CLOEXEC = 0x1
FD_NFDBITS = 0x40 FD_NFDBITS = 0x40
FD_SETSIZE = 0x10000 FD_SETSIZE = 0x10000
FF0 = 0x0
FF1 = 0x8000
FFDLY = 0x8000
FLUSHALL = 0x1 FLUSHALL = 0x1
FLUSHDATA = 0x0 FLUSHDATA = 0x0
FLUSHO = 0x2000 FLUSHO = 0x2000
@ -290,6 +304,10 @@ const (
F_DUP2FD_CLOEXEC = 0x24 F_DUP2FD_CLOEXEC = 0x24
F_DUPFD = 0x0 F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x25 F_DUPFD_CLOEXEC = 0x25
F_FLOCK = 0x35
F_FLOCK64 = 0x35
F_FLOCKW = 0x36
F_FLOCKW64 = 0x36
F_FREESP = 0xb F_FREESP = 0xb
F_FREESP64 = 0xb F_FREESP64 = 0xb
F_GETFD = 0x1 F_GETFD = 0x1
@ -304,6 +322,12 @@ const (
F_MDACC = 0x20 F_MDACC = 0x20
F_NODNY = 0x0 F_NODNY = 0x0
F_NPRIV = 0x10 F_NPRIV = 0x10
F_OFD_GETLK = 0x2f
F_OFD_GETLK64 = 0x2f
F_OFD_SETLK = 0x30
F_OFD_SETLK64 = 0x30
F_OFD_SETLKW = 0x31
F_OFD_SETLKW64 = 0x31
F_PRIV = 0xf F_PRIV = 0xf
F_QUOTACTL = 0x11 F_QUOTACTL = 0x11
F_RDACC = 0x1 F_RDACC = 0x1
@ -332,6 +356,7 @@ const (
F_WRDNY = 0x2 F_WRDNY = 0x2
F_WRLCK = 0x2 F_WRLCK = 0x2
HUPCL = 0x400 HUPCL = 0x400
IBSHIFT = 0x10
ICANON = 0x2 ICANON = 0x2
ICRNL = 0x100 ICRNL = 0x100
IEXTEN = 0x8000 IEXTEN = 0x8000
@ -589,15 +614,21 @@ const (
IP_UNSPEC_SRC = 0x42 IP_UNSPEC_SRC = 0x42
ISIG = 0x1 ISIG = 0x1
ISTRIP = 0x20 ISTRIP = 0x20
IUCLC = 0x200
IXANY = 0x800 IXANY = 0x800
IXOFF = 0x1000 IXOFF = 0x1000
IXON = 0x400 IXON = 0x400
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_ACCESS_DEFAULT = 0x6 MADV_ACCESS_DEFAULT = 0x6
MADV_ACCESS_LWP = 0x7 MADV_ACCESS_LWP = 0x7
MADV_ACCESS_MANY = 0x8 MADV_ACCESS_MANY = 0x8
MADV_DONTNEED = 0x4 MADV_DONTNEED = 0x4
MADV_FREE = 0x5 MADV_FREE = 0x5
MADV_NORMAL = 0x0 MADV_NORMAL = 0x0
MADV_PURGE = 0x9
MADV_RANDOM = 0x1 MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2 MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3 MADV_WILLNEED = 0x3
@ -605,6 +636,7 @@ const (
MAP_ALIGN = 0x200 MAP_ALIGN = 0x200
MAP_ANON = 0x100 MAP_ANON = 0x100
MAP_ANONYMOUS = 0x100 MAP_ANONYMOUS = 0x100
MAP_FILE = 0x0
MAP_FIXED = 0x10 MAP_FIXED = 0x10
MAP_INITDATA = 0x800 MAP_INITDATA = 0x800
MAP_NORESERVE = 0x40 MAP_NORESERVE = 0x40
@ -632,10 +664,14 @@ const (
MS_OLDSYNC = 0x0 MS_OLDSYNC = 0x0
MS_SYNC = 0x4 MS_SYNC = 0x4
M_FLUSH = 0x86 M_FLUSH = 0x86
NL0 = 0x0
NL1 = 0x100
NLDLY = 0x100
NOFLSH = 0x80 NOFLSH = 0x80
OCRNL = 0x8 OCRNL = 0x8
OFDEL = 0x80 OFDEL = 0x80
OFILL = 0x40 OFILL = 0x40
OLCUC = 0x2
ONLCR = 0x4 ONLCR = 0x4
ONLRET = 0x20 ONLRET = 0x20
ONOCR = 0x10 ONOCR = 0x10
@ -955,12 +991,21 @@ const (
SO_USELOOPBACK = 0x40 SO_USELOOPBACK = 0x40
SO_VRRP = 0x1017 SO_VRRP = 0x1017
SO_WROFF = 0x2 SO_WROFF = 0x2
TAB0 = 0x0
TAB1 = 0x800
TAB2 = 0x1000
TAB3 = 0x1800
TABDLY = 0x1800
TCFLSH = 0x5407 TCFLSH = 0x5407
TCGETA = 0x5401 TCGETA = 0x5401
TCGETS = 0x540d TCGETS = 0x540d
TCIFLUSH = 0x0 TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2 TCIOFLUSH = 0x2
TCION = 0x3
TCOFLUSH = 0x1 TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
TCP_ABORT_THRESHOLD = 0x11 TCP_ABORT_THRESHOLD = 0x11
TCP_ANONPRIVBIND = 0x20 TCP_ANONPRIVBIND = 0x20
TCP_CONN_ABORT_THRESHOLD = 0x13 TCP_CONN_ABORT_THRESHOLD = 0x13
@ -1089,6 +1134,8 @@ const (
WSTOPPED = 0x4 WSTOPPED = 0x4
WTRAPPED = 0x2 WTRAPPED = 0x2
WUNTRACED = 0x4 WUNTRACED = 0x4
XCASE = 0x4
XTABS = 0x1800
) )
// Errors // Errors

View File

@ -1,5 +1,5 @@
// mksyscall.pl -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // mksyscall.pl -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // Code generated by the command above; see README.md. DO NOT EDIT.
// +build dragonfly,amd64 // +build dragonfly,amd64
@ -1380,3 +1380,14 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@ -312,6 +312,17 @@ func Close(fd int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) {
r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(oldfd int) (fd int, err error) { func Dup(oldfd int) (fd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
fd = int(r0) fd = int(r0)

Some files were not shown because too many files have changed in this diff Show More