Merge pull request #846 from surajnarwade/buildv3

Added support for build key in v3
This commit is contained in:
Charlie Drage 2017-11-30 09:06:32 -05:00 committed by GitHub
commit 84be7400a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
323 changed files with 28993 additions and 66984 deletions

View File

@ -112,6 +112,15 @@ test-image:
test-container:
docker run -v `pwd`:/opt/tmp/kompose:ro -it $(TEST_IMAGE)
# Update vendoring
# Vendoring is a bit messy right now
.PHONY: vendor-update
vendor-update:
glide update --strip-vendor
glide-vc --only-code --no-tests
find ./vendor/github.com/docker/distribution -type f -exec sed -i 's/Sirupsen/sirupen/g' {} \;
.PHONE: test-k8s
test-k8s:
./script/test_k8s/test.sh

View File

@ -6,7 +6,7 @@ import (
"io"
"os"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)

View File

@ -19,9 +19,9 @@ package cmd
import (
"strings"
log "github.com/Sirupsen/logrus"
"github.com/kubernetes/kompose/pkg/app"
"github.com/kubernetes/kompose/pkg/kobject"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)

View File

@ -21,7 +21,7 @@ import (
"os"
"strings"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)

View File

@ -17,7 +17,7 @@ limitations under the License.
package cmd
import (
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/kubernetes/kompose/pkg/app"
"github.com/kubernetes/kompose/pkg/kobject"

48
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 14203f2282ff9f112d87d12b1c3f855502d3d0e808520a887757ce8f273f1086
updated: 2017-10-10T12:44:09.037210249-04:00
hash: 0b7bed52681c512a3c15884363e1fd04ebc39efc52368cfa34ec4ef46f4eedc7
updated: 2017-10-25T14:45:17.433498815+05:30
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
@ -104,7 +104,7 @@ imports:
- name: github.com/dgrijalva/jwt-go
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20
- name: github.com/docker/cli
version: 9bdb0763b9e667dc01adf36ba98a2b7bd47bdc75
version: 9b7656cc05d2878c85dc1252f5813f0ad77d808f
subpackages:
- cli/compose/interpolation
- cli/compose/loader
@ -170,7 +170,6 @@ imports:
- api/types/swarm
- api/types/versions
- pkg/urlutil
- runconfig/opts
- name: github.com/docker/engine-api
version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd
subpackages:
@ -196,7 +195,7 @@ imports:
- name: github.com/docker/go-units
version: 0bbddae09c5a5419a8c6dcdd7ff90da3d450393b
- name: github.com/docker/libcompose
version: 4a647d664afbe05c41455c9d534d8239671eb46a
version: 57bd716502dcbe1799f026148016022b0f3b989c
subpackages:
- config
- logger
@ -216,11 +215,11 @@ imports:
- name: github.com/evanphx/json-patch
version: 465937c80b3c07a7c7ad20cc934898646a91c1de
- name: github.com/fatih/structs
version: 7e5a8eef611ee84dd359503f3969f80df4c50723
version: dc3312cb1a4513a366c4c9e622ad55c32df12ed3
- name: github.com/flynn/go-shlex
version: 3f9db97f856818214da2e1057f8ad84803971cff
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
version: 629574ca2a5df945712d3079857300b5e4da0236
- name: github.com/fsouza/go-dockerclient
version: bf97c77db7c945cbcdbf09d56c6f87a66f54537b
subpackages:
@ -338,7 +337,7 @@ imports:
- runtime/internal
- utilities
- name: github.com/hashicorp/hcl
version: 42e33e2d55a0ff1d6263f738896ea8c13571a8d0
version: 37ab263305aaeb501a60eb16863e808d426e37f2
subpackages:
- hcl/ast
- hcl/parser
@ -359,7 +358,7 @@ imports:
- name: github.com/juju/ratelimit
version: 77ed1c8a01217656d2080ad51981f6e99adaa177
- name: github.com/magiconair/properties
version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a
version: be5ece7dd465ab0765a9682137865547526d1dfb
- name: github.com/mattn/go-shellwords
version: 95c860c1895b21b58903abdd1d9c591560b0601c
- name: github.com/matttproud/golang_protobuf_extensions
@ -367,7 +366,7 @@ imports:
subpackages:
- pbutil
- name: github.com/mitchellh/mapstructure
version: d0303fe809921458f417bcf828397a65db30a7e4
version: 5a0325d7fafaac12dda6e7fb8bd222ec1b69875e
- name: github.com/novln/docker-parser
version: 6030251119d652af8ead44ac7907444227b64d56
subpackages:
@ -427,10 +426,12 @@ imports:
- pkg/version
- name: github.com/pborman/uuid
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: 2009e44b6f182e34d8ce081ac2767622937ea3d4
version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a
- name: github.com/pkg/errors
version: 2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/prometheus/client_golang
version: e51041b3fa41cece0dca035740ba6411905be473
subpackages:
@ -447,26 +448,26 @@ imports:
- model
- name: github.com/prometheus/procfs
version: 454a56f35412459b5e684fd5ec0f9211b94f002a
- name: github.com/sirupsen/logrus
version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
- name: github.com/Sirupsen/logrus
version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
repo: git@github.com:/sirupsen/logrus
vcs: git
- name: github.com/sirupsen/logrus
version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
- name: github.com/spf13/afero
version: e67d870304c4bca21331b02f414f970df13aa694
version: 2f30b2a92c0e5700bcfe4715891adb1f2a7a406d
subpackages:
- mem
- name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
version: 24b6558033ffe202bf42f0f3b870dcc798dd2ba8
- name: github.com/spf13/cobra
version: 4d6af280c76ff7d266434f2dba207c4b75dfc076
version: 9495bc009a56819bdb0ddbc1a373e29c140bc674
- name: github.com/spf13/jwalterweatherman
version: 12bd96e66386c1960ab0f74ced1362f66f552f7b
version: 33c24e77fb80341fe7130ee7c594256ff08ccc46
- name: github.com/spf13/pflag
version: a9789e855c7696159b7db0db7f440b449edf2b31
version: 5ccb023bc27df288a957c5e994cd44fd19619465
- name: github.com/spf13/viper
version: d9cca5ef33035202efb1586825bdbb15ff9ec3ba
version: 651d9d916abc3c3d6a91a12549495caba5edffd2
- name: github.com/ugorji/go
version: f4485b318aadd133842532f841dc205a8e339d74
subpackages:
@ -478,7 +479,7 @@ imports:
- name: github.com/xeipuuv/gojsonschema
version: 93e72a773fade158921402d6a24c819b48aba29d
- name: golang.org/x/crypto
version: 1f22c0103821b9390939b6776727195525381532
version: 81e90905daefcd6fd217b62423c0908922eadb30
subpackages:
- ssh/terminal
- name: golang.org/x/net
@ -504,9 +505,10 @@ imports:
- jws
- jwt
- name: golang.org/x/sys
version: ebfc5b4631820b793c9010c87fd8fef0f39eb082
version: 833a04a10549a95dc34458c195cbad61bbb6cb4d
subpackages:
- unix
- windows
- name: golang.org/x/text
version: ceefd2213ed29504fff30155163c8f59827734f3
subpackages:
@ -546,7 +548,7 @@ imports:
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2
version: eb3733d160e74a9c7e442f435eb3bea458e1d19f
version: a5b47d31c556af34a302ce5d659e6fea44d90de0
- name: k8s.io/client-go
version: d72c0e162789e1bbb33c33cfa26858a1375efe01
subpackages:

View File

@ -7,11 +7,6 @@ import:
- package: github.com/sirupsen/logrus
version: 1.0.3
- package: github.com/Sirupsen/logrus
repo: git@github.com:/sirupsen/logrus
vcs: git
version: 1.0.3
- package: github.com/xeipuuv/gojsonschema
version: 93e72a773fade158921402d6a24c819b48aba29d
@ -24,7 +19,7 @@ import:
# We use libcompose to parse v1 and v2 of Docker Compose
- package: github.com/docker/libcompose
version: 4a647d664afbe05c41455c9d534d8239671eb46a
version: 57bd716502dcbe1799f026148016022b0f3b989c
subpackages:
- config
- lookup
@ -32,7 +27,7 @@ import:
# We use docker/cli to parse v3 of Docker Compose
- package: github.com/docker/cli
version: 9bdb0763b9e667dc01adf36ba98a2b7bd47bdc75
version: 9b7656cc05d2878c85dc1252f5813f0ad77d808f
# Docker parser library for image names
- package: github.com/novln/docker-parser

View File

@ -19,7 +19,7 @@ package app
import (
"strings"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
// install kubernetes api

View File

@ -26,10 +26,10 @@ import (
"k8s.io/kubernetes/pkg/api"
log "github.com/Sirupsen/logrus"
"github.com/fatih/structs"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// Bundle is docker bundle file loader, implements Loader interface

View File

@ -24,11 +24,11 @@ import (
yaml "gopkg.in/yaml.v2"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project"
"github.com/fatih/structs"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// Compose is docker compose file loader, implements Loader interface

View File

@ -25,6 +25,8 @@ import (
"github.com/kubernetes/kompose/pkg/kobject"
"k8s.io/kubernetes/pkg/api"
"time"
"github.com/docker/cli/cli/compose/types"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
@ -32,14 +34,18 @@ import (
"github.com/pkg/errors"
)
func durationPtr(value time.Duration) *time.Duration {
return &value
}
func TestParseHealthCheck(t *testing.T) {
helperValue := uint64(2)
check := types.HealthCheckConfig{
Test: []string{"CMD-SHELL", "echo", "foobar"},
Timeout: "1s",
Interval: "2s",
Timeout: durationPtr(1 * time.Second),
Interval: durationPtr(2 * time.Second),
Retries: &helperValue,
StartPeriod: "3s",
StartPeriod: durationPtr(3 * time.Second),
}
// CMD-SHELL or SHELL is included Test within docker/cli, thus we remove the first value in Test

View File

@ -26,13 +26,13 @@ import (
"k8s.io/kubernetes/pkg/api"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/kubernetes/kompose/pkg/transformer"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// Parse Docker Compose with libcompose (only supports v1 and v2). Eventually we will

View File

@ -31,9 +31,9 @@ import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// converts os.Environ() ([]string) to map[string]string
@ -171,16 +171,16 @@ func parseHealthCheck(composeHealthCheck types.HealthCheckConfig) (kobject.Healt
var timeout, interval, retries, startPeriod int32
// Here we convert the timeout from 1h30s (example) to 36030 seconds.
if composeHealthCheck.Timeout != "" {
parse, err := time.ParseDuration(composeHealthCheck.Timeout)
if composeHealthCheck.Timeout != nil {
parse, err := time.ParseDuration(composeHealthCheck.Timeout.String())
if err != nil {
return kobject.HealthCheck{}, errors.Wrap(err, "unable to parse health check timeout variable")
}
timeout = int32(parse.Seconds())
}
if composeHealthCheck.Interval != "" {
parse, err := time.ParseDuration(composeHealthCheck.Interval)
if composeHealthCheck.Interval != nil {
parse, err := time.ParseDuration(composeHealthCheck.Interval.String())
if err != nil {
return kobject.HealthCheck{}, errors.Wrap(err, "unable to parse health check interval variable")
}
@ -191,8 +191,8 @@ func parseHealthCheck(composeHealthCheck types.HealthCheckConfig) (kobject.Healt
retries = int32(*composeHealthCheck.Retries)
}
if composeHealthCheck.StartPeriod != "" {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod)
if composeHealthCheck.StartPeriod != nil {
parse, err := time.ParseDuration(composeHealthCheck.StartPeriod.String())
if err != nil {
return kobject.HealthCheck{}, errors.Wrap(err, "unable to parse health check startPeriod variable")
}
@ -312,8 +312,9 @@ func dockerComposeToKomposeMapping(composeObject *types.Config) (kobject.Kompose
// TODO: Build is not yet supported, see:
// https://github.com/docker/cli/blob/master/cli/compose/types/types.go#L9
// We will have to *manually* add this / parse.
// serviceConfig.Build = composeServiceConfig.Build.Context
// serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
serviceConfig.Build = composeServiceConfig.Build.Context
serviceConfig.Dockerfile = composeServiceConfig.Build.Dockerfile
serviceConfig.BuildArgs = composeServiceConfig.Build.Args
// Gather the environment values
// DockerCompose uses map[string]*string while we use []string

View File

@ -30,11 +30,11 @@ import (
"text/template"
"time"
log "github.com/Sirupsen/logrus"
"github.com/ghodss/yaml"
"github.com/joho/godotenv"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/kubernetes/kompose/pkg/transformer"
log "github.com/sirupsen/logrus"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"

View File

@ -22,12 +22,12 @@ import (
"strconv"
"time"
log "github.com/Sirupsen/logrus"
"github.com/fatih/structs"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/kubernetes/kompose/pkg/transformer"
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
log "github.com/sirupsen/logrus"
// install kubernetes api
_ "k8s.io/kubernetes/pkg/api/install"

View File

@ -23,7 +23,7 @@ import (
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/kubernetes/kompose/pkg/transformer/kubernetes"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"

View File

@ -24,8 +24,8 @@ import (
"path"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/kubernetes/kompose/pkg/kobject"
log "github.com/sirupsen/logrus"
"path/filepath"

View File

@ -18,10 +18,10 @@ package docker
import (
"bytes"
log "github.com/Sirupsen/logrus"
dockerlib "github.com/fsouza/go-dockerclient"
"github.com/kubernetes/kompose/pkg/utils/archive"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path"

View File

@ -18,10 +18,10 @@ package docker
import (
"bytes"
log "github.com/Sirupsen/logrus"
dockerlib "github.com/fsouza/go-dockerclient"
"github.com/novln/docker-parser"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// Push will provide methods for interaction with API regarding pushing images

View File

@ -84,6 +84,16 @@ cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-no
sed -e "s;%VERSION%;$version;g" -e "s;%CMD%;$cmd;g" -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template.json > /tmp/output-os.json
convert::expect_success_and_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose.yml convert --stdout -j --build build-config" "/tmp/output-os.json" "$warning"
# Replacing variables with current branch and uri
# Test BuildConfig v3/OpenShift Test
cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose-v3.yml convert --stdout -j --build build-config"
sed -e "s;%VERSION%;$version;g" -e "s;%CMD%;$cmd;g" -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template-v3.json > /tmp/output-os.json
convert::expect_success_and_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose-v3.yml convert --stdout -j --build build-config" "/tmp/output-os.json" "$warning"
#Kubernetes Test for v3
cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose-v3.yml convert --stdout -j"
sed -e "s;%VERSION%;$version;g" -e "s;%CMD%;$cmd;g" -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-k8s-template-v3.json > /tmp/output-k8s.json
convert::expect_success "kompose -f $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/docker-compose-v3.yml convert --stdout -j" "/tmp/output-k8s.json"
######
# Tests related to docker-compose file in /script/test/fixtures/entrypoint-command

View File

@ -0,0 +1,25 @@
version: "2"
services:
nginx:
build: ./nginx
ports:
- "80:80"
restart: always
node1:
build: ./node
ports:
- "8080"
node2:
build: ./node
ports:
- "8080"
node3:
build: ./node
ports:
- "8080"
redis:
image: redis
ports:
- "6379"

View File

@ -0,0 +1,372 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "80",
"port": 80,
"targetPort": 80
}
],
"selector": {
"io.kompose.service": "nginx"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node1"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node2"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node3"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx",
"ports": [
{
"containerPort": 80
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
}
},
"spec": {
"containers": [
{
"name": "node1",
"image": "node1",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
}
},
"spec": {
"containers": [
{
"name": "node2",
"image": "node2",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
}
},
"spec": {
"containers": [
{
"name": "node3",
"image": "node3",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
},
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "redis",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
},
"spec": {
"containers": [
{
"name": "redis",
"image": "redis",
"ports": [
{
"containerPort": 6379
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
},
"strategy": {}
},
"status": {}
}
]
}

View File

@ -0,0 +1,807 @@
{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "80",
"port": 80,
"targetPort": 80
}
],
"selector": {
"io.kompose.service": "nginx"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node1"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node2"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "8080",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"io.kompose.service": "node3"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"ports": [
{
"name": "6379",
"port": 6379,
"targetPort": 6379
}
],
"selector": {
"io.kompose.service": "redis"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"nginx"
],
"from": {
"kind": "ImageStreamTag",
"name": "nginx:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "nginx"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": " ",
"ports": [
{
"containerPort": 80
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
}
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "nginx"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "nginx"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/nginx-node-redis/nginx/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "nginx:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"node1"
],
"from": {
"kind": "ImageStreamTag",
"name": "node1:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "node1"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
}
},
"spec": {
"containers": [
{
"name": "node1",
"image": " ",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
}
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "node1"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node1"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/nginx-node-redis/node/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node1:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"node2"
],
"from": {
"kind": "ImageStreamTag",
"name": "node2:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "node2"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
}
},
"spec": {
"containers": [
{
"name": "node2",
"image": " ",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
}
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "node2"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node2"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/nginx-node-redis/node/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node2:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"node3"
],
"from": {
"kind": "ImageStreamTag",
"name": "node3:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "node3"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
}
},
"spec": {
"containers": [
{
"name": "node3",
"image": " ",
"ports": [
{
"containerPort": 8080
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
}
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage",
"name": "node3"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "node3"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "%URI%",
"ref": "%REF%"
},
"contextDir": "script/test/fixtures/nginx-node-redis/node/"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node3:latest"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": null
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "redis",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
},
"annotations": {
"kompose.cmd": "%CMD%",
"kompose.version": "%VERSION%"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"redis"
],
"from": {
"kind": "ImageStreamTag",
"name": "redis:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"io.kompose.service": "redis"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
},
"spec": {
"containers": [
{
"name": "redis",
"image": " ",
"ports": [
{
"containerPort": 6379
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "redis",
"creationTimestamp": null,
"labels": {
"io.kompose.service": "redis"
}
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
}
]
}

View File

@ -8,7 +8,6 @@ import (
"sort"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/cli/cli/compose/interpolation"
"github.com/docker/cli/cli/compose/schema"
"github.com/docker/cli/cli/compose/template"
@ -19,6 +18,7 @@ import (
shellwords "github.com/mattn/go-shellwords"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
)
@ -221,6 +221,7 @@ func createTransformHook() mapstructure.DecodeHookFuncType {
reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false),
reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false),
reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig,
reflect.TypeOf(types.BuildConfig{}): transformBuildConfig,
}
return func(_ reflect.Type, target reflect.Type, data interface{}) (interface{}, error) {
@ -563,6 +564,17 @@ func transformStringSourceMap(data interface{}) (interface{}, error) {
}
}
func transformBuildConfig(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return map[string]interface{}{"context": value}, nil
case map[string]interface{}:
return data, nil
default:
return data, errors.Errorf("invalid type %T for service build", value)
}
}
func transformServiceVolumeConfig(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
@ -572,7 +584,6 @@ func transformServiceVolumeConfig(data interface{}) (interface{}, error) {
default:
return data, errors.Errorf("invalid type %T for service volume", value)
}
}
func transformServiceNetworkMap(value interface{}) (interface{}, error) {

View File

@ -112,6 +112,11 @@ func isFilePath(source string) bool {
return true
}
// windows named pipes
if strings.HasPrefix(source, `\\`) {
return true
}
first, nextIndex := utf8.DecodeRuneInString(source)
return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
}

File diff suppressed because one or more lines are too long

View File

@ -23,6 +23,7 @@ var UnsupportedProperties = []string{
"shm_size",
"sysctls",
"tmpfs",
"ulimits",
"userns_mode",
}
@ -182,10 +183,10 @@ type DeployConfig struct {
// HealthCheckConfig the healthcheck configuration for a service
type HealthCheckConfig struct {
Test HealthCheckTest
Timeout string
Interval string
Timeout *time.Duration
Interval *time.Duration
Retries *uint64
StartPeriod string
StartPeriod *time.Duration `mapstructure:"start_period"`
Disable bool
}

View File

@ -95,12 +95,12 @@ func (n *NetworkOpt) String() string {
return ""
}
func parseDriverOpt(driverOpt string) (key string, value string, err error) {
func parseDriverOpt(driverOpt string) (string, string, error) {
parts := strings.SplitN(driverOpt, "=", 2)
if len(parts) != 2 {
err = fmt.Errorf("invalid key value pair format in driver options")
return "", "", fmt.Errorf("invalid key value pair format in driver options")
}
key = strings.TrimSpace(strings.ToLower(parts[0]))
value = strings.TrimSpace(strings.ToLower(parts[1]))
return
key := strings.TrimSpace(strings.ToLower(parts[0]))
value := strings.TrimSpace(strings.ToLower(parts[1]))
return key, value, nil
}

View File

@ -8,7 +8,7 @@ import (
"sync"
"time"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/docker/distribution/uuid"
"github.com/gorilla/mux"
)

View File

@ -3,7 +3,7 @@ package context
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
"runtime"
)

View File

@ -3,7 +3,7 @@ package schema1
import (
"crypto/x509"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
"github.com/docker/libtrust"
)

View File

@ -1,81 +0,0 @@
package opts
import (
"bufio"
"bytes"
"fmt"
"os"
"strings"
"unicode"
"unicode/utf8"
)
// ParseEnvFile reads a file with environment variables enumerated by lines
//
// ``Environment variable names used by the utilities in the Shell and
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
// letters, digits, and the '_' (underscore) from the characters defined in
// Portable Character Set and do not begin with a digit. *But*, other
// characters may be permitted by an implementation; applications shall
// tolerate the presence of such names.''
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
//
// As of #16585, it's up to application inside docker to validate or not
// environment variables, that's why we just strip leading whitespace and
// nothing more.
func ParseEnvFile(filename string) ([]string, error) {
fh, err := os.Open(filename)
if err != nil {
return []string{}, err
}
defer fh.Close()
lines := []string{}
scanner := bufio.NewScanner(fh)
currentLine := 0
utf8bom := []byte{0xEF, 0xBB, 0xBF}
for scanner.Scan() {
scannedBytes := scanner.Bytes()
if !utf8.Valid(scannedBytes) {
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
}
// We trim UTF8 BOM
if currentLine == 0 {
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
}
// trim the line from all leading whitespace first
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
currentLine++
// line is not empty, and not starting with '#'
if len(line) > 0 && !strings.HasPrefix(line, "#") {
data := strings.SplitN(line, "=", 2)
// trim the front of a variable, but nothing else
variable := strings.TrimLeft(data[0], whiteSpaces)
if strings.ContainsAny(variable, whiteSpaces) {
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
}
if len(data) > 1 {
// pass the value through, no trimming
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
} else {
// if only a pass-through variable is given, clean it up.
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
}
}
}
return lines, scanner.Err()
}
var whiteSpaces = " \t"
// ErrBadEnvVariable typed error for bad environment variable
type ErrBadEnvVariable struct {
msg string
}
func (e ErrBadEnvVariable) Error() string {
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
}

View File

@ -1,87 +0,0 @@
package opts
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/container"
)
// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys
// present in the file with additional pairs specified in the override parameter
func ReadKVStrings(files []string, override []string) ([]string, error) {
envVariables := []string{}
for _, ef := range files {
parsedVars, err := ParseEnvFile(ef)
if err != nil {
return nil, err
}
envVariables = append(envVariables, parsedVars...)
}
// parse the '-e' and '--env' after, to allow override
envVariables = append(envVariables, override...)
return envVariables, nil
}
// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"}
func ConvertKVStringsToMap(values []string) map[string]string {
result := make(map[string]string, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) == 1 {
result[kv[0]] = ""
} else {
result[kv[0]] = kv[1]
}
}
return result
}
// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"}
// but set unset keys to nil - meaning the ones with no "=" in them.
// We use this in cases where we need to distinguish between
// FOO= and FOO
// where the latter case just means FOO was mentioned but not given a value
func ConvertKVStringsToMapWithNil(values []string) map[string]*string {
result := make(map[string]*string, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) == 1 {
result[kv[0]] = nil
} else {
result[kv[0]] = &kv[1]
}
}
return result
}
// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
func ParseRestartPolicy(policy string) (container.RestartPolicy, error) {
p := container.RestartPolicy{}
if policy == "" {
return p, nil
}
parts := strings.Split(policy, ":")
if len(parts) > 2 {
return p, fmt.Errorf("invalid restart policy format")
}
if len(parts) == 2 {
count, err := strconv.Atoi(parts[1])
if err != nil {
return p, fmt.Errorf("maximum retry count must be an integer")
}
p.MaximumRetryCount = count
}
p.Name = parts[0]
return p, nil
}

View File

@ -86,8 +86,12 @@ func GetServiceHash(name string, config *ServiceConfig) string {
for _, sliceKey := range sliceKeys {
io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey))
}
case *yaml.Networks:
io.WriteString(hash, fmt.Sprintf("%s, ", s.HashString()))
case *yaml.Volumes:
io.WriteString(hash, fmt.Sprintf("%s, ", s.HashString()))
default:
io.WriteString(hash, fmt.Sprintf("%v", serviceValue))
io.WriteString(hash, fmt.Sprintf("%v, ", serviceValue))
}
}

View File

@ -5,13 +5,19 @@ import (
"fmt"
"strings"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
)
var defaultValues = make(map[string]string)
func isNum(c uint8) bool {
return c >= '0' && c <= '9'
}
func validVariableDefault(c uint8, line string, pos int) bool {
return (c == ':' && line[pos+1] == '-') || (c == '-')
}
func validVariableNameChar(c uint8) bool {
return c == '_' ||
c >= 'A' && c <= 'Z' ||
@ -36,6 +42,30 @@ func parseVariable(line string, pos int, mapping func(string) string) (string, i
return mapping(buffer.String()), pos, true
}
func parseDefaultValue(line string, pos int) (string, int, bool) {
var buffer bytes.Buffer
// only skip :, :- and - at the beginning
for ; pos < len(line); pos++ {
c := line[pos]
if c == ':' || c == '-' {
continue
}
break
}
for ; pos < len(line); pos++ {
c := line[pos]
if c == '}' {
return buffer.String(), pos - 1, true
}
err := buffer.WriteByte(c)
if err != nil {
return "", pos, false
}
}
return "", 0, false
}
func parseVariableWithBraces(line string, pos int, mapping func(string) string) (string, int, bool) {
var buffer bytes.Buffer
@ -49,10 +79,13 @@ func parseVariableWithBraces(line string, pos int, mapping func(string) string)
if bufferString == "" {
return "", 0, false
}
return mapping(buffer.String()), pos, true
case validVariableNameChar(c):
buffer.WriteByte(c)
case validVariableDefault(c, line, pos):
defaultValue := ""
defaultValue, pos, _ = parseDefaultValue(line, pos)
defaultValues[buffer.String()] = defaultValue
default:
return "", 0, false
}
@ -143,10 +176,19 @@ func Interpolate(key string, data *interface{}, environmentLookup EnvironmentLoo
values := environmentLookup.Lookup(s, nil)
if len(values) == 0 {
if val, ok := defaultValues[s]; ok {
return val
}
logrus.Warnf("The %s variable is not set. Substituting a blank string.", s)
return ""
}
if strings.SplitN(values[0], "=", 2)[1] == "" {
if val, ok := defaultValues[s]; ok {
return val
}
}
// Use first result if many are given
value := values[0]

View File

@ -6,11 +6,12 @@ import (
"fmt"
"strings"
"reflect"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/libcompose/utils"
composeYaml "github.com/docker/libcompose/yaml"
"gopkg.in/yaml.v2"
"reflect"
)
var (
@ -229,19 +230,11 @@ func readEnvFile(resourceLookup ResourceLookup, inFile string, serviceData RawSe
serviceData["environment"] = vars
delete(serviceData, "env_file")
return serviceData, nil
}
func mergeConfig(baseService, serviceData RawService) RawService {
for k, v := range serviceData {
// Image and build are mutually exclusive in merge
if k == "image" {
delete(baseService, "build")
} else if k == "build" {
delete(baseService, "image")
}
existing, ok := baseService[k]
if ok {
baseService[k] = merge(existing, v)

View File

@ -4,8 +4,8 @@ import (
"fmt"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/libcompose/utils"
"github.com/sirupsen/logrus"
)
// MergeServicesV1 merges a v1 compose file into an existing set of service configs
@ -29,7 +29,7 @@ func MergeServicesV1(existingServices *ServiceConfigs, environmentLookup Environ
return nil, err
}
data = mergeConfig(rawExistingService, data)
data = mergeConfigV1(rawExistingService, data)
}
datas[name] = data
@ -148,7 +148,7 @@ func parseV1(resourceLookup ResourceLookup, environmentLookup EnvironmentLookup,
}
}
baseService = mergeConfig(baseService, serviceData)
baseService = mergeConfigV1(baseService, serviceData)
logrus.Debugf("Merged result %#v", baseService)
@ -177,3 +177,22 @@ func resolveContextV1(inFile string, serviceData RawService) RawService {
return serviceData
}
func mergeConfigV1(baseService, serviceData RawService) RawService {
for k, v := range serviceData {
// Image and build are mutually exclusive in merge
if k == "image" {
delete(baseService, "build")
} else if k == "build" {
delete(baseService, "image")
}
existing, ok := baseService[k]
if ok {
baseService[k] = merge(existing, v)
} else {
baseService[k] = v
}
}
return baseService
}

View File

@ -5,8 +5,8 @@ import (
"path"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/libcompose/utils"
"github.com/sirupsen/logrus"
)
// MergeServicesV2 merges a v2 compose file into an existing set of service configs

View File

@ -3,7 +3,7 @@ package lookup
import (
"strings"
"github.com/docker/docker/runconfig/opts"
"github.com/docker/cli/opts"
"github.com/docker/libcompose/config"
)

View File

@ -7,7 +7,7 @@ import (
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
)
// relativePath returns the proper relative path for the given file path. If

View File

@ -9,9 +9,9 @@ import (
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/logger"
"github.com/sirupsen/logrus"
)
var projectRegexp = regexp.MustCompile("[^a-zA-Z0-9_.-]")

View File

@ -3,8 +3,8 @@ package project
import (
"bytes"
"github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project/events"
"github.com/sirupsen/logrus"
)
var (

View File

@ -31,6 +31,7 @@ type Create struct {
// Run holds options of compose run.
type Run struct {
Detached bool
DisableTty bool
}
// Up holds options of compose up.

View File

@ -10,13 +10,13 @@ import (
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/logger"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project/events"
"github.com/docker/libcompose/utils"
"github.com/docker/libcompose/yaml"
log "github.com/sirupsen/logrus"
)
// ComposeVersion is name of docker-compose.yml file syntax supported version

View File

@ -2,6 +2,7 @@ package project
import (
"fmt"
"sync"
"golang.org/x/net/context"
@ -12,6 +13,8 @@ import (
// the Filter struct.
func (p *Project) Containers(ctx context.Context, filter Filter, services ...string) ([]string, error) {
containers := []string{}
var lock sync.Mutex
err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
wrapper.Do(nil, events.NoEvent, events.NoEvent, func(service Service) error {
serviceContainers, innerErr := service.Containers(ctx)
@ -37,7 +40,9 @@ func (p *Project) Containers(ctx context.Context, filter Filter, services ...str
return fmt.Errorf("Invalid container filter: %s", filter.State)
}
containerID := container.ID()
lock.Lock()
containers = append(containers, containerID)
lock.Unlock()
}
return nil
})

View File

@ -5,7 +5,7 @@ import (
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
// Scale scales the specified services.

View File

@ -3,8 +3,8 @@ package project
import (
"sync"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project/events"
log "github.com/sirupsen/logrus"
)
type serviceWrapper struct {

View File

@ -5,7 +5,7 @@ import (
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)

View File

@ -3,6 +3,8 @@ package yaml
import (
"errors"
"fmt"
"sort"
"strings"
)
// Networks represents a list of service networks in compose file.
@ -20,6 +22,35 @@ type Network struct {
IPv6Address string `yaml:"ipv6_address,omitempty"`
}
// Generate a hash string to detect service network config changes
func (n *Networks) HashString() string {
if n == nil {
return ""
}
result := []string{}
for _, net := range n.Networks {
result = append(result, net.HashString())
}
sort.Strings(result)
return strings.Join(result, ",")
}
// Generate a hash string to detect service network config changes
func (n *Network) HashString() string {
if n == nil {
return ""
}
result := []string{}
result = append(result, n.Name)
result = append(result, n.RealName)
sort.Strings(n.Aliases)
result = append(result, strings.Join(n.Aliases, ","))
result = append(result, n.IPv4Address)
result = append(result, n.IPv6Address)
sort.Strings(result)
return strings.Join(result, ",")
}
// MarshalYAML implements the Marshaller interface.
func (n Networks) MarshalYAML() (interface{}, error) {
m := map[string]*Network{}

View File

@ -3,6 +3,7 @@ package yaml
import (
"errors"
"fmt"
"sort"
"strings"
)
@ -19,6 +20,19 @@ type Volume struct {
AccessMode string `yaml:"-"`
}
// Generate a hash string to detect service volume config changes
func (v *Volumes) HashString() string {
if v == nil {
return ""
}
result := []string{}
for _, vol := range v.Volumes {
result = append(result, vol.String())
}
sort.Strings(result)
return strings.Join(result, ",")
}
// String implements the Stringer interface.
func (v *Volume) String() string {
var paths []string

View File

@ -530,22 +530,15 @@ func (s *Struct) nested(val reflect.Value) interface{} {
finalVal = m
}
case reflect.Map:
// get the element type of the map
mapElem := val.Type()
switch val.Type().Kind() {
case reflect.Ptr, reflect.Array, reflect.Map,
reflect.Slice, reflect.Chan:
mapElem = val.Type().Elem()
if mapElem.Kind() == reflect.Ptr {
mapElem = mapElem.Elem()
}
v := val.Type().Elem()
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
// only iterate over struct types, ie: map[string]StructType,
// map[string][]StructType,
if mapElem.Kind() == reflect.Struct ||
(mapElem.Kind() == reflect.Slice &&
mapElem.Elem().Kind() == reflect.Struct) {
if v.Kind() == reflect.Struct ||
(v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Struct) {
m := make(map[string]interface{}, val.Len())
for _, k := range val.MapKeys() {
m[k.String()] = s.nested(val.MapIndex(k))

View File

@ -9,7 +9,6 @@ package fsnotify
import (
"bytes"
"errors"
"fmt"
)
@ -61,6 +60,3 @@ func (op Op) String() string {
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var ErrEventOverflow = errors.New("fsnotify queue overflow")

View File

@ -24,6 +24,7 @@ type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
@ -55,6 +56,7 @@ func NewWatcher() (*Watcher, error) {
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
@ -101,23 +103,21 @@ func (w *Watcher) Add(name string) error {
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
watchEntry, found := w.watches[name]
w.mu.Unlock()
if found {
watchEntry.flags |= flags
flags |= unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.mu.Lock()
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
w.mu.Unlock()
return nil
}
@ -135,13 +135,6 @@ func (w *Watcher) Remove(name string) error {
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@ -159,6 +152,13 @@ func (w *Watcher) Remove(name string) error {
return errno
}
// wait until ignoreLinux() deleting maps
exists := true
for exists {
w.cv.Wait()
_, exists = w.watches[name]
}
return nil
}
@ -245,31 +245,13 @@ func (w *Watcher) readEvents() {
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
name := w.paths[int(raw.Wd)]
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@ -280,7 +262,7 @@ func (w *Watcher) readEvents() {
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
if !event.ignoreLinux(w, raw.Wd, mask) {
select {
case w.Events <- event:
case <-w.done:
@ -297,9 +279,15 @@ func (w *Watcher) readEvents() {
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
w.mu.Lock()
defer w.mu.Unlock()
name := w.paths[int(wd)]
delete(w.paths, int(wd))
delete(w.watches, name)
w.cv.Broadcast()
return true
}

View File

@ -89,7 +89,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
switch k.Kind() {
case reflect.Bool:
return d.decodeBool(name, node, result)
case reflect.Float32, reflect.Float64:
case reflect.Float64:
return d.decodeFloat(name, node, result)
case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
if n.Token.Type == token.FLOAT {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
}
result.Set(reflect.ValueOf(v).Convert(result.Type()))
result.Set(reflect.ValueOf(v))
return nil
}
}

View File

@ -3,7 +3,6 @@
package parser
import (
"bytes"
"errors"
"fmt"
"strings"
@ -37,11 +36,6 @@ func newParser(src []byte) *Parser {
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
// normalize all line endings
// since the scanner and output only work with "\n" line endings, we may
// end up with dangling "\r" characters in the parsed data.
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
p := newParser(src)
return p.Parse()
}
@ -197,12 +191,9 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
keyStr = append(keyStr, k.Token.Text)
}
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf(
return nil, fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " ")),
}
strings.Join(keyStr, " "))
}
// do a look-ahead for line comment
@ -322,10 +313,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
}
o.List = l
@ -358,7 +346,7 @@ func (p *Parser) listType() (*ast.ListType, error) {
}
}
switch tok.Type {
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
@ -400,16 +388,12 @@ func (p *Parser) listType() (*ast.ListType, error) {
}
l.Add(node)
needComma = true
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK:
node, err := p.listType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
// TODO(arslan) should we support nested lists? Even though it's
// written in README of HCL, it's not a part of the grammar
// (not defined in parse.y)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos

View File

@ -147,7 +147,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
// Done
return keys, nil
case token.ILLEGAL:
return nil, errors.New("illegal")
fmt.Println("illegal")
default:
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}

View File

@ -511,9 +511,6 @@ func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
if p.DisableExpansion {
prev, ok = p.Get(key)
p.m[key] = value
if !ok {
p.k = append(p.k, key)
}
return prev, ok, nil
}

View File

@ -38,6 +38,12 @@ func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Type, to reflect.Type,
data interface{}) (interface{}, error) {
// Build our arguments that reflect expects
argVals := make([]reflect.Value, 3)
argVals[0] = reflect.ValueOf(from)
argVals[1] = reflect.ValueOf(to)
argVals[2] = reflect.ValueOf(data)
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from, to, data)
@ -115,11 +121,6 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
@ -131,8 +132,9 @@ func WeaklyTypedHook(
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
} else {
return "0", nil
}
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:

View File

@ -1,5 +1,5 @@
// Package mapstructure exposes functionality to convert an arbitrary
// map[string]interface{} into a native Go structure.
// The mapstructure package exposes functionality to convert an
// arbitrary map[string]interface{} into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
@ -32,12 +32,7 @@ import (
// both.
type DecodeHookFunc interface{}
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
// the source and target types.
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder
@ -74,9 +69,6 @@ type DecoderConfig struct {
// - empty array = empty map and vice versa
// - negative numbers to overflowed uint values (base 10)
// - slice of maps to a merged map
// - single values are converted to slices if required. Each
// element is weakly decoded. For example: "4" can become []int{4}
// if the target type is an int slice.
//
WeaklyTypedInput bool
@ -210,7 +202,7 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
d.config.DecodeHook,
dataVal.Type(), val.Type(), data)
if err != nil {
return fmt.Errorf("error decoding '%s': %s", name, err)
return err
}
}
@ -237,8 +229,6 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
err = d.decodePtr(name, data, val)
case reflect.Slice:
err = d.decodeSlice(name, data, val)
case reflect.Func:
err = d.decodeFunc(name, data, val)
default:
// If we reached this point then we weren't able to decode it
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
@ -441,7 +431,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
case dataKind == reflect.Uint:
val.SetFloat(float64(dataVal.Uint()))
case dataKind == reflect.Float32:
val.SetFloat(dataVal.Float())
val.SetFloat(float64(dataVal.Float()))
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetFloat(1)
@ -556,12 +546,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
// into that. Then set the value of the pointer to this type.
valType := val.Type()
valElemType := valType.Elem()
realVal := val
if realVal.IsNil() || d.config.ZeroFields {
realVal = reflect.New(valElemType)
}
realVal := reflect.New(valElemType)
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
return err
}
@ -570,19 +555,6 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
return nil
}
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
dataVal := reflect.Indirect(reflect.ValueOf(data))
if val.Type() != dataVal.Type() {
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
val.Set(dataVal)
return nil
}
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataValKind := dataVal.Kind()
@ -590,44 +562,26 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
valElemType := valType.Elem()
sliceType := reflect.SliceOf(valElemType)
valSlice := val
if valSlice.IsNil() || d.config.ZeroFields {
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput {
switch {
// Empty maps turn into empty slices
case dataValKind == reflect.Map:
if dataVal.Len() == 0 {
// Accept empty map instead of array/slice in weakly typed mode
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
val.Set(reflect.MakeSlice(sliceType, 0, 0))
return nil
}
// All other types we try to convert to the slice type
// and "lift" it into it. i.e. a string becomes a string slice.
default:
// Just re-try this function with data as a slice.
return d.decodeSlice(name, []interface{}{data}, val)
}
}
} else {
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
}
}
// Make a new slice to hold our result, same size as the original data.
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
}
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
// Accumulate any errors
errors := make([]string, 0)
for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface()
for valSlice.Len() <= i {
valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
}
currentField := valSlice.Index(i)
fieldName := fmt.Sprintf("%s[%d]", name, i)

117
vendor/github.com/pelletier/go-buffruneio/buffruneio.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
package buffruneio
import (
"bufio"
"container/list"
"errors"
"io"
)
// Rune to indicate end of file.
const (
EOF = -(iota + 1)
)
// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
var ErrNoRuneToUnread = errors.New("no rune to unwind")
// Reader implements runes buffering for an io.Reader object.
type Reader struct {
buffer *list.List
current *list.Element
input *bufio.Reader
}
// NewReader returns a new Reader.
func NewReader(rd io.Reader) *Reader {
return &Reader{
buffer: list.New(),
input: bufio.NewReader(rd),
}
}
type runeWithSize struct {
r rune
size int
}
func (rd *Reader) feedBuffer() error {
r, size, err := rd.input.ReadRune()
if err != nil {
if err != io.EOF {
return err
}
r = EOF
}
newRuneWithSize := runeWithSize{r, size}
rd.buffer.PushBack(newRuneWithSize)
if rd.current == nil {
rd.current = rd.buffer.Back()
}
return nil
}
// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
func (rd *Reader) ReadRune() (rune, int, error) {
if rd.current == rd.buffer.Back() || rd.current == nil {
err := rd.feedBuffer()
if err != nil {
return EOF, 0, err
}
}
runeWithSize := rd.current.Value.(runeWithSize)
rd.current = rd.current.Next()
return runeWithSize.r, runeWithSize.size, nil
}
// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
func (rd *Reader) UnreadRune() error {
if rd.current == rd.buffer.Front() {
return ErrNoRuneToUnread
}
if rd.current == nil {
rd.current = rd.buffer.Back()
} else {
rd.current = rd.current.Prev()
}
return nil
}
// Forget removes runes stored before the current stream position index.
func (rd *Reader) Forget() {
if rd.current == nil {
rd.current = rd.buffer.Back()
}
for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
}
}
// PeekRune returns at most the next n runes, reading from the uderlying source if
// needed. Does not move the current index. It includes EOF if reached.
func (rd *Reader) PeekRunes(n int) []rune {
res := make([]rune, 0, n)
cursor := rd.current
for i := 0; i < n; i++ {
if cursor == nil {
err := rd.feedBuffer()
if err != nil {
return res
}
cursor = rd.buffer.Back()
}
if cursor != nil {
r := cursor.Value.(runeWithSize).r
res = append(res, r)
if r == EOF {
return res
}
cursor = cursor.Next()
}
}
return res
}

View File

@ -1,23 +1,250 @@
// Package toml is a TOML parser and manipulation library.
// Package toml is a TOML markup language parser.
//
// This version supports the specification as described in
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
//
// Marshaling
// TOML Parsing
//
// Go-toml can marshal and unmarshal TOML documents from and to data
// structures.
// TOML data may be parsed in two ways: by file, or by string.
//
// TOML document as a tree
// // load TOML data by filename
// tree, err := toml.LoadFile("filename.toml")
//
// Go-toml can operate on a TOML document as a tree. Use one of the Load*
// functions to parse TOML data and obtain a Tree instance, then one of its
// methods to manipulate the tree.
// // load TOML data stored in a string
// tree, err := toml.Load(stringContainingTomlData)
//
// JSONPath-like queries
// Either way, the result is a TomlTree object that can be used to navigate the
// structure and data within the original document.
//
// The package github.com/pelletier/go-toml/query implements a system
// similar to JSONPath to quickly retrive elements of a TOML document using a
// single expression. See the package documentation for more information.
//
// Getting data from the TomlTree
//
// After parsing TOML data with Load() or LoadFile(), use the Has() and Get()
// methods on the returned TomlTree, to find your way through the document data.
//
// if tree.Has("foo") {
// fmt.Println("foo is:", tree.Get("foo"))
// }
//
// Working with Paths
//
// Go-toml has support for basic dot-separated key paths on the Has(), Get(), Set()
// and GetDefault() methods. These are the same kind of key paths used within the
// TOML specification for struct tames.
//
// // looks for a key named 'baz', within struct 'bar', within struct 'foo'
// tree.Has("foo.bar.baz")
//
// // returns the key at this path, if it is there
// tree.Get("foo.bar.baz")
//
// TOML allows keys to contain '.', which can cause this syntax to be problematic
// for some documents. In such cases, use the GetPath(), HasPath(), and SetPath(),
// methods to explicitly define the path. This form is also faster, since
// it avoids having to parse the passed key for '.' delimiters.
//
// // looks for a key named 'baz', within struct 'bar', within struct 'foo'
// tree.HasPath([]string{"foo","bar","baz"})
//
// // returns the key at this path, if it is there
// tree.GetPath([]string{"foo","bar","baz"})
//
// Note that this is distinct from the heavyweight query syntax supported by
// TomlTree.Query() and the Query() struct (see below).
//
// Position Support
//
// Each element within the TomlTree is stored with position metadata, which is
// invaluable for providing semantic feedback to a user. This helps in
// situations where the TOML file parses correctly, but contains data that is
// not correct for the application. In such cases, an error message can be
// generated that indicates the problem line and column number in the source
// TOML document.
//
// // load TOML data
// tree, _ := toml.Load("filename.toml")
//
// // get an entry and report an error if it's the wrong type
// element := tree.Get("foo")
// if value, ok := element.(int64); !ok {
// return fmt.Errorf("%v: Element 'foo' must be an integer", tree.GetPosition("foo"))
// }
//
// // report an error if an expected element is missing
// if !tree.Has("bar") {
// return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition(""))
// }
//
// Query Support
//
// The TOML query path implementation is based loosely on the JSONPath specification:
// http://goessner.net/articles/JsonPath/
//
// The idea behind a query path is to allow quick access to any element, or set
// of elements within TOML document, with a single expression.
//
// result, err := tree.Query("$.foo.bar.baz")
//
// This is roughly equivalent to:
//
// next := tree.Get("foo")
// if next != nil {
// next = next.Get("bar")
// if next != nil {
// next = next.Get("baz")
// }
// }
// result := next
//
// err is nil if any parsing exception occurs.
//
// If no node in the tree matches the query, result will simply contain an empty list of
// items.
//
// As illustrated above, the query path is much more efficient, especially since
// the structure of the TOML file can vary. Rather than making assumptions about
// a document's structure, a query allows the programmer to make structured
// requests into the document, and get zero or more values as a result.
//
// The syntax of a query begins with a root token, followed by any number
// sub-expressions:
//
// $
// Root of the TOML tree. This must always come first.
// .name
// Selects child of this node, where 'name' is a TOML key
// name.
// ['name']
// Selects child of this node, where 'name' is a string
// containing a TOML key name.
// [index]
// Selcts child array element at 'index'.
// ..expr
// Recursively selects all children, filtered by an a union,
// index, or slice expression.
// ..*
// Recursive selection of all nodes at this point in the
// tree.
// .*
// Selects all children of the current node.
// [expr,expr]
// Union operator - a logical 'or' grouping of two or more
// sub-expressions: index, key name, or filter.
// [start:end:step]
// Slice operator - selects array elements from start to
// end-1, at the given step. All three arguments are
// optional.
// [?(filter)]
// Named filter expression - the function 'filter' is
// used to filter children at this node.
//
// Query Indexes And Slices
//
// Index expressions perform no bounds checking, and will contribute no
// values to the result set if the provided index or index range is invalid.
// Negative indexes represent values from the end of the array, counting backwards.
//
// // select the last index of the array named 'foo'
// tree.Query("$.foo[-1]")
//
// Slice expressions are supported, by using ':' to separate a start/end index pair.
//
// // select up to the first five elements in the array
// tree.Query("$.foo[0:5]")
//
// Slice expressions also allow negative indexes for the start and stop
// arguments.
//
// // select all array elements.
// tree.Query("$.foo[0:-1]")
//
// Slice expressions may have an optional stride/step parameter:
//
// // select every other element
// tree.Query("$.foo[0:-1:2]")
//
// Slice start and end parameters are also optional:
//
// // these are all equivalent and select all the values in the array
// tree.Query("$.foo[:]")
// tree.Query("$.foo[0:]")
// tree.Query("$.foo[:-1]")
// tree.Query("$.foo[0:-1:]")
// tree.Query("$.foo[::1]")
// tree.Query("$.foo[0::1]")
// tree.Query("$.foo[:-1:1]")
// tree.Query("$.foo[0:-1:1]")
//
// Query Filters
//
// Query filters are used within a Union [,] or single Filter [] expression.
// A filter only allows nodes that qualify through to the next expression,
// and/or into the result set.
//
// // returns children of foo that are permitted by the 'bar' filter.
// tree.Query("$.foo[?(bar)]")
//
// There are several filters provided with the library:
//
// tree
// Allows nodes of type TomlTree.
// int
// Allows nodes of type int64.
// float
// Allows nodes of type float64.
// string
// Allows nodes of type string.
// time
// Allows nodes of type time.Time.
// bool
// Allows nodes of type bool.
//
// Query Results
//
// An executed query returns a QueryResult object. This contains the nodes
// in the TOML tree that qualify the query expression. Position information
// is also available for each value in the set.
//
// // display the results of a query
// results := tree.Query("$.foo.bar.baz")
// for idx, value := results.Values() {
// fmt.Println("%v: %v", results.Positions()[idx], value)
// }
//
// Compiled Queries
//
// Queries may be executed directly on a TomlTree object, or compiled ahead
// of time and executed discretely. The former is more convienent, but has the
// penalty of having to recompile the query expression each time.
//
// // basic query
// results := tree.Query("$.foo.bar.baz")
//
// // compiled query
// query := toml.CompileQuery("$.foo.bar.baz")
// results := query.Execute(tree)
//
// // run the compiled query again on a different tree
// moreResults := query.Execute(anotherTree)
//
// User Defined Query Filters
//
// Filter expressions may also be user defined by using the SetFilter()
// function on the Query object. The function must return true/false, which
// signifies if the passed node is kept or discarded, respectively.
//
// // create a query that references a user-defined filter
// query, _ := CompileQuery("$[?(bazOnly)]")
//
// // define the filter, and assign it to the query
// query.SetFilter("bazOnly", func(node interface{}) bool{
// if tree, ok := node.(*TomlTree); ok {
// return tree.Has("baz")
// }
// return false // reject all other node types
// })
//
// // run the query
// query.Execute(tree)
//
package toml

View File

@ -6,12 +6,14 @@
package toml
import (
"bytes"
"errors"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"github.com/pelletier/go-buffruneio"
)
var dateRegexp *regexp.Regexp
@ -21,11 +23,9 @@ type tomlLexStateFn func() tomlLexStateFn
// Define lexer
type tomlLexer struct {
inputIdx int
input []rune // Textual source
currentTokenStart int
currentTokenStop int
tokens []token
input *buffruneio.Reader // Textual source
buffer []rune // Runes composing the current token
tokens chan token
depth int
line int
col int
@ -36,14 +36,16 @@ type tomlLexer struct {
// Basic read operations on input
func (l *tomlLexer) read() rune {
r := l.peek()
r, _, err := l.input.ReadRune()
if err != nil {
panic(err)
}
if r == '\n' {
l.endbufferLine++
l.endbufferCol = 1
} else {
l.endbufferCol++
}
l.inputIdx++
return r
}
@ -51,13 +53,13 @@ func (l *tomlLexer) next() rune {
r := l.read()
if r != eof {
l.currentTokenStop++
l.buffer = append(l.buffer, r)
}
return r
}
func (l *tomlLexer) ignore() {
l.currentTokenStart = l.currentTokenStop
l.buffer = make([]rune, 0)
l.line = l.endbufferLine
l.col = l.endbufferCol
}
@ -74,46 +76,49 @@ func (l *tomlLexer) fastForward(n int) {
}
func (l *tomlLexer) emitWithValue(t tokenType, value string) {
l.tokens = append(l.tokens, token{
l.tokens <- token{
Position: Position{l.line, l.col},
typ: t,
val: value,
})
}
l.ignore()
}
func (l *tomlLexer) emit(t tokenType) {
l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
l.emitWithValue(t, string(l.buffer))
}
func (l *tomlLexer) peek() rune {
if l.inputIdx >= len(l.input) {
return eof
r, _, err := l.input.ReadRune()
if err != nil {
panic(err)
}
return l.input[l.inputIdx]
}
func (l *tomlLexer) peekString(size int) string {
maxIdx := len(l.input)
upperIdx := l.inputIdx + size // FIXME: potential overflow
if upperIdx > maxIdx {
upperIdx = maxIdx
}
return string(l.input[l.inputIdx:upperIdx])
l.input.UnreadRune()
return r
}
func (l *tomlLexer) follow(next string) bool {
return next == l.peekString(len(next))
for _, expectedRune := range next {
r, _, err := l.input.ReadRune()
defer l.input.UnreadRune()
if err != nil {
panic(err)
}
if expectedRune != r {
return false
}
}
return true
}
// Error management
func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
l.tokens = append(l.tokens, token{
l.tokens <- token{
Position: Position{l.line, l.col},
typ: tokenError,
val: fmt.Sprintf(format, args...),
})
}
return nil
}
@ -214,7 +219,7 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
break
}
possibleDate := l.peekString(35)
possibleDate := string(l.input.PeekRunes(35))
dateMatch := dateRegexp.FindString(possibleDate)
if dateMatch != "" {
l.fastForward(len(dateMatch))
@ -531,7 +536,7 @@ func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
if l.currentTokenStop > l.currentTokenStart {
if len(l.buffer) > 0 {
l.emit(tokenKeyGroupArray)
}
l.next()
@ -554,7 +559,7 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
if l.currentTokenStop > l.currentTokenStart {
if len(l.buffer) > 0 {
l.emit(tokenKeyGroup)
}
l.next()
@ -629,6 +634,7 @@ func (l *tomlLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
close(l.tokens)
}
func init() {
@ -636,16 +642,16 @@ func init() {
}
// Entry point
func lexToml(inputBytes []byte) []token {
runes := bytes.Runes(inputBytes)
func lexToml(input io.Reader) chan token {
bufferedInput := buffruneio.NewReader(input)
l := &tomlLexer{
input: runes,
tokens: make([]token, 0, 256),
input: bufferedInput,
tokens: make(chan token),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
l.run()
go l.run()
return l.tokens
}

View File

@ -1,508 +0,0 @@
package toml
import (
"bytes"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"time"
)
type tomlOpts struct {
name string
comment string
commented bool
include bool
omitempty bool
}
var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// Check if the given marshall type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
return isPrimitive(mtype.Elem())
case reflect.Bool:
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Struct:
return mtype == timeType || isCustomMarshaler(mtype)
default:
return false
}
}
// Check if the given marshall type maps to a Tree slice
func isTreeSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Slice:
return !isOtherSlice(mtype)
default:
return false
}
}
// Check if the given marshall type maps to a non-Tree slice
func isOtherSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
return isOtherSlice(mtype.Elem())
case reflect.Slice:
return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
default:
return false
}
}
// Check if the given marshall type maps to a Tree
func isTree(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Map:
return true
case reflect.Struct:
return !isPrimitive(mtype)
default:
return false
}
}
func isCustomMarshaler(mtype reflect.Type) bool {
return mtype.Implements(marshalerType)
}
func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
return mval.Interface().(Marshaler).MarshalTOML()
}
// Marshaler is the interface implemented by types that
// can marshal themselves into valid TOML.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
/*
Marshal returns the TOML encoding of v. Behavior is similar to the Go json
encoder, except that there is no concept of a Marshaler interface or MarshalTOML
function for sub-structs, and currently only definite types can be marshaled
(i.e. no `interface{}`).
The following struct annotations are supported:
toml:"Field" Overrides the field's name to output.
omitempty When set, empty values and groups are not emitted.
comment:"comment" Emits a # comment on the same line. This supports new lines.
commented:"true" Emits the value as commented.
Note that pointers are automatically assigned the "omitempty" option, as TOML
explicity does not handle null values (saying instead the label should be
dropped).
Tree structural types and corresponding marshal types:
*Tree (*)struct, (*)map[string]interface{}
[]*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
[]interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
interface{} (*)primitive
Tree primitive types and corresponding marshal types:
uint64 uint, uint8-uint64, pointers to same
int64 int, int8-uint64, pointers to same
float64 float32, float64, pointers to same
string string, pointers to same
bool bool, pointers to same
time.Time time.Time{}, pointers to same
*/
func Marshal(v interface{}) ([]byte, error) {
mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Struct {
return []byte{}, errors.New("Only a struct can be marshaled to TOML")
}
sval := reflect.ValueOf(v)
if isCustomMarshaler(mtype) {
return callCustomMarshaler(sval)
}
t, err := valueToTree(mtype, sval)
if err != nil {
return []byte{}, err
}
s, err := t.ToTomlString()
return []byte(s), err
}
// Convert given marshal struct or map value to toml tree
func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
if mtype.Kind() == reflect.Ptr {
return valueToTree(mtype.Elem(), mval.Elem())
}
tval := newTree()
switch mtype.Kind() {
case reflect.Struct:
for i := 0; i < mtype.NumField(); i++ {
mtypef, mvalf := mtype.Field(i), mval.Field(i)
opts := tomlOptions(mtypef)
if opts.include && (!opts.omitempty || !isZero(mvalf)) {
val, err := valueToToml(mtypef.Type, mvalf)
if err != nil {
return nil, err
}
tval.Set(opts.name, opts.comment, opts.commented, val)
}
}
case reflect.Map:
for _, key := range mval.MapKeys() {
mvalf := mval.MapIndex(key)
val, err := valueToToml(mtype.Elem(), mvalf)
if err != nil {
return nil, err
}
tval.Set(key.String(), "", false, val)
}
}
return tval, nil
}
// Convert given marshal slice to slice of Toml trees
func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
tval := make([]*Tree, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ {
val, err := valueToTree(mtype.Elem(), mval.Index(i))
if err != nil {
return nil, err
}
tval[i] = val
}
return tval, nil
}
// Convert given marshal slice to slice of toml values
func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
tval := make([]interface{}, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ {
val, err := valueToToml(mtype.Elem(), mval.Index(i))
if err != nil {
return nil, err
}
tval[i] = val
}
return tval, nil
}
// Convert given marshal value to toml value
func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
if mtype.Kind() == reflect.Ptr {
return valueToToml(mtype.Elem(), mval.Elem())
}
switch {
case isCustomMarshaler(mtype):
return callCustomMarshaler(mval)
case isTree(mtype):
return valueToTree(mtype, mval)
case isTreeSlice(mtype):
return valueToTreeSlice(mtype, mval)
case isOtherSlice(mtype):
return valueToOtherSlice(mtype, mval)
default:
switch mtype.Kind() {
case reflect.Bool:
return mval.Bool(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return mval.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return mval.Uint(), nil
case reflect.Float32, reflect.Float64:
return mval.Float(), nil
case reflect.String:
return mval.String(), nil
case reflect.Struct:
return mval.Interface().(time.Time), nil
default:
return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
}
}
}
// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
// sub-structs, and only definite types can be unmarshaled.
func (t *Tree) Unmarshal(v interface{}) error {
mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
return errors.New("Only a pointer to struct can be unmarshaled from TOML")
}
sval, err := valueFromTree(mtype.Elem(), t)
if err != nil {
return err
}
reflect.ValueOf(v).Elem().Set(sval)
return nil
}
// Unmarshal parses the TOML-encoded data and stores the result in the value
// pointed to by v. Behavior is similar to the Go json encoder, except that there
// is no concept of an Unmarshaler interface or UnmarshalTOML function for
// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
// `interface{}`).
//
// The following struct annotations are supported:
//
// toml:"Field" Overrides the field's name to map to.
//
// See Marshal() documentation for types mapping table.
func Unmarshal(data []byte, v interface{}) error {
t, err := LoadReader(bytes.NewReader(data))
if err != nil {
return err
}
return t.Unmarshal(v)
}
// Convert toml tree to marshal struct or map, using marshal type
func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval)
}
var mval reflect.Value
switch mtype.Kind() {
case reflect.Struct:
mval = reflect.New(mtype).Elem()
for i := 0; i < mtype.NumField(); i++ {
mtypef := mtype.Field(i)
opts := tomlOptions(mtypef)
if opts.include {
baseKey := opts.name
keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
for _, key := range keysToTry {
exists := tval.Has(key)
if !exists {
continue
}
val := tval.Get(key)
mvalf, err := valueFromToml(mtypef.Type, val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.Field(i).Set(mvalf)
break
}
}
}
case reflect.Map:
mval = reflect.MakeMap(mtype)
for _, key := range tval.Keys() {
val := tval.Get(key)
mvalf, err := valueFromToml(mtype.Elem(), val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.SetMapIndex(reflect.ValueOf(key), mvalf)
}
}
return mval, nil
}
// Convert toml value to marshal struct/map slice, using marshal type
func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ {
val, err := valueFromTree(mtype.Elem(), tval[i])
if err != nil {
return mval, err
}
mval.Index(i).Set(val)
}
return mval, nil
}
// Convert toml value to marshal primitive slice, using marshal type
func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ {
val, err := valueFromToml(mtype.Elem(), tval[i])
if err != nil {
return mval, err
}
mval.Index(i).Set(val)
}
return mval, nil
}
// Convert toml value to marshal value, using marshal type
func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval)
}
switch {
case isTree(mtype):
return valueFromTree(mtype, tval.(*Tree))
case isTreeSlice(mtype):
return valueFromTreeSlice(mtype, tval.([]*Tree))
case isOtherSlice(mtype):
return valueFromOtherSlice(mtype, tval.([]interface{}))
default:
switch mtype.Kind() {
case reflect.Bool:
val, ok := tval.(bool)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Int:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int(val)), nil
case reflect.Int8:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int8(val)), nil
case reflect.Int16:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int16(val)), nil
case reflect.Int32:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int32(val)), nil
case reflect.Int64:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Uint:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint(val)), nil
case reflect.Uint8:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint8(val)), nil
case reflect.Uint16:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint16(val)), nil
case reflect.Uint32:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint32(val)), nil
case reflect.Uint64:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint64(val)), nil
case reflect.Float32:
val, ok := tval.(float64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
}
return reflect.ValueOf(float32(val)), nil
case reflect.Float64:
val, ok := tval.(float64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.String:
val, ok := tval.(string)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Struct:
val, ok := tval.(time.Time)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
}
return reflect.ValueOf(val), nil
default:
return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind())
}
}
}
func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
val, err := valueFromToml(mtype.Elem(), tval)
if err != nil {
return reflect.ValueOf(nil), err
}
mval := reflect.New(mtype.Elem())
mval.Elem().Set(val)
return mval, nil
}
func tomlOptions(vf reflect.StructField) tomlOpts {
tag := vf.Tag.Get("toml")
parse := strings.Split(tag, ",")
var comment string
if c := vf.Tag.Get("comment"); c != "" {
comment = c
}
commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false}
if parse[0] != "" {
if parse[0] == "-" && len(parse) == 1 {
result.include = false
} else {
result.name = strings.Trim(parse[0], " ")
}
}
if vf.PkgPath != "" {
result.include = false
}
if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
result.omitempty = true
}
if vf.Type.Kind() == reflect.Ptr {
result.omitempty = true
}
return result
}
func isZero(val reflect.Value) bool {
switch val.Type().Kind() {
case reflect.Map:
fallthrough
case reflect.Array:
fallthrough
case reflect.Slice:
return val.Len() == 0
default:
return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
}
}
func formatError(err error, pos Position) error {
if err.Error()[0] == '(' { // Error already contains position information
return err
}
return fmt.Errorf("%s: %s", pos, err)
}

234
vendor/github.com/pelletier/go-toml/match.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
package toml
import (
"fmt"
)
// support function to set positions for tomlValues
// NOTE: this is done to allow ctx.lastPosition to indicate the start of any
// values returned by the query engines
func tomlValueCheck(node interface{}, ctx *queryContext) interface{} {
switch castNode := node.(type) {
case *tomlValue:
ctx.lastPosition = castNode.position
return castNode.value
case []*TomlTree:
if len(castNode) > 0 {
ctx.lastPosition = castNode[0].position
}
return node
default:
return node
}
}
// base match
type matchBase struct {
next pathFn
}
func (f *matchBase) setNext(next pathFn) {
f.next = next
}
// terminating functor - gathers results
type terminatingFn struct {
// empty
}
func newTerminatingFn() *terminatingFn {
return &terminatingFn{}
}
func (f *terminatingFn) setNext(next pathFn) {
// do nothing
}
func (f *terminatingFn) call(node interface{}, ctx *queryContext) {
switch castNode := node.(type) {
case *TomlTree:
ctx.result.appendResult(node, castNode.position)
case *tomlValue:
ctx.result.appendResult(node, castNode.position)
default:
// use last position for scalars
ctx.result.appendResult(node, ctx.lastPosition)
}
}
// match single key
type matchKeyFn struct {
matchBase
Name string
}
func newMatchKeyFn(name string) *matchKeyFn {
return &matchKeyFn{Name: name}
}
func (f *matchKeyFn) call(node interface{}, ctx *queryContext) {
if array, ok := node.([]*TomlTree); ok {
for _, tree := range array {
item := tree.values[f.Name]
if item != nil {
f.next.call(item, ctx)
}
}
} else if tree, ok := node.(*TomlTree); ok {
item := tree.values[f.Name]
if item != nil {
f.next.call(item, ctx)
}
}
}
// match single index
type matchIndexFn struct {
matchBase
Idx int
}
func newMatchIndexFn(idx int) *matchIndexFn {
return &matchIndexFn{Idx: idx}
}
func (f *matchIndexFn) call(node interface{}, ctx *queryContext) {
if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
if f.Idx < len(arr) && f.Idx >= 0 {
f.next.call(arr[f.Idx], ctx)
}
}
}
// filter by slicing
type matchSliceFn struct {
matchBase
Start, End, Step int
}
func newMatchSliceFn(start, end, step int) *matchSliceFn {
return &matchSliceFn{Start: start, End: end, Step: step}
}
func (f *matchSliceFn) call(node interface{}, ctx *queryContext) {
if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
// adjust indexes for negative values, reverse ordering
realStart, realEnd := f.Start, f.End
if realStart < 0 {
realStart = len(arr) + realStart
}
if realEnd < 0 {
realEnd = len(arr) + realEnd
}
if realEnd < realStart {
realEnd, realStart = realStart, realEnd // swap
}
// loop and gather
for idx := realStart; idx < realEnd; idx += f.Step {
f.next.call(arr[idx], ctx)
}
}
}
// match anything
type matchAnyFn struct {
matchBase
}
func newMatchAnyFn() *matchAnyFn {
return &matchAnyFn{}
}
func (f *matchAnyFn) call(node interface{}, ctx *queryContext) {
if tree, ok := node.(*TomlTree); ok {
for _, v := range tree.values {
f.next.call(v, ctx)
}
}
}
// filter through union
type matchUnionFn struct {
Union []pathFn
}
func (f *matchUnionFn) setNext(next pathFn) {
for _, fn := range f.Union {
fn.setNext(next)
}
}
func (f *matchUnionFn) call(node interface{}, ctx *queryContext) {
for _, fn := range f.Union {
fn.call(node, ctx)
}
}
// match every single last node in the tree
type matchRecursiveFn struct {
matchBase
}
func newMatchRecursiveFn() *matchRecursiveFn {
return &matchRecursiveFn{}
}
func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) {
if tree, ok := node.(*TomlTree); ok {
var visit func(tree *TomlTree)
visit = func(tree *TomlTree) {
for _, v := range tree.values {
f.next.call(v, ctx)
switch node := v.(type) {
case *TomlTree:
visit(node)
case []*TomlTree:
for _, subtree := range node {
visit(subtree)
}
}
}
}
f.next.call(tree, ctx)
visit(tree)
}
}
// match based on an externally provided functional filter
type matchFilterFn struct {
matchBase
Pos Position
Name string
}
func newMatchFilterFn(name string, pos Position) *matchFilterFn {
return &matchFilterFn{Name: name, Pos: pos}
}
func (f *matchFilterFn) call(node interface{}, ctx *queryContext) {
fn, ok := (*ctx.filters)[f.Name]
if !ok {
panic(fmt.Sprintf("%s: query context does not have filter '%s'",
f.Pos.String(), f.Name))
}
switch castNode := tomlValueCheck(node, ctx).(type) {
case *TomlTree:
for _, v := range castNode.values {
if tv, ok := v.(*tomlValue); ok {
if fn(tv.value) {
f.next.call(v, ctx)
}
} else {
if fn(v) {
f.next.call(v, ctx)
}
}
}
case []interface{}:
for _, v := range castNode {
if fn(v) {
f.next.call(v, ctx)
}
}
}
}

View File

@ -13,9 +13,9 @@ import (
)
type tomlParser struct {
flowIdx int
flow []token
tree *Tree
flow chan token
tree *TomlTree
tokensBuffer []token
currentTable []string
seenTableKeys []string
}
@ -34,10 +34,16 @@ func (p *tomlParser) run() {
}
func (p *tomlParser) peek() *token {
if p.flowIdx >= len(p.flow) {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &p.flow[p.flowIdx]
p.tokensBuffer = append(p.tokensBuffer, tok)
return &tok
}
func (p *tomlParser) assume(typ tokenType) {
@ -51,12 +57,16 @@ func (p *tomlParser) assume(typ tokenType) {
}
func (p *tomlParser) getToken() *token {
tok := p.peek()
if tok == nil {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.flowIdx++
return tok
return &tok
}
func (p *tomlParser) parseStart() tomlParserStateFn {
@ -96,21 +106,21 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys)
var array []*Tree
var array []*TomlTree
if destTree == nil {
array = make([]*Tree, 0)
} else if target, ok := destTree.([]*Tree); ok && target != nil {
array = destTree.([]*Tree)
array = make([]*TomlTree, 0)
} else if target, ok := destTree.([]*TomlTree); ok && target != nil {
array = destTree.([]*TomlTree)
} else {
p.raiseError(key, "key %s is already assigned and not of type table array", key)
}
p.currentTable = keys
// add a new tree to the end of the table array
newTree := newTree()
newTree := newTomlTree()
newTree.position = startToken.Position
array = append(array, newTree)
p.tree.SetPath(p.currentTable, "", false, array)
p.tree.SetPath(p.currentTable, array)
// remove all keys that were children of this table array
prefix := key.val + "."
@ -173,11 +183,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
}
// find the table to assign, looking out for arrays of tables
var targetNode *Tree
var targetNode *TomlTree
switch node := p.tree.GetPath(tableKey).(type) {
case []*Tree:
case []*TomlTree:
targetNode = node[len(node)-1]
case *Tree:
case *TomlTree:
targetNode = node
default:
p.raiseError(key, "Unknown table type for path: %s",
@ -202,10 +212,10 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
var toInsert interface{}
switch value.(type) {
case *Tree, []*Tree:
case *TomlTree, []*TomlTree:
toInsert = value
default:
toInsert = &tomlValue{value: value, position: key.Position}
toInsert = &tomlValue{value, key.Position}
}
targetNode.values[keyVal] = toInsert
return p.parseStart
@ -279,8 +289,8 @@ func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma
}
func (p *tomlParser) parseInlineTable() *Tree {
tree := newTree()
func (p *tomlParser) parseInlineTable() *TomlTree {
tree := newTomlTree()
var previous *token
Loop:
for {
@ -299,7 +309,7 @@ Loop:
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
tree.Set(key.val, "", false, value)
tree.Set(key.val, value)
case tokenComma:
if previous == nil {
p.raiseError(follow, "inline table cannot start with a comma")
@ -350,27 +360,27 @@ func (p *tomlParser) parseArray() interface{} {
p.getToken()
}
}
// An array of Trees is actually an array of inline
// An array of TomlTrees is actually an array of inline
// tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*Tree,
// array was not converted from []interface{} to []*TomlTree,
// the two notations would not be equivalent.
if arrayType == reflect.TypeOf(newTree()) {
tomlArray := make([]*Tree, len(array))
if arrayType == reflect.TypeOf(newTomlTree()) {
tomlArray := make([]*TomlTree, len(array))
for i, v := range array {
tomlArray[i] = v.(*Tree)
tomlArray[i] = v.(*TomlTree)
}
return tomlArray
}
return array
}
func parseToml(flow []token) *Tree {
result := newTree()
func parseToml(flow chan token) *TomlTree {
result := newTomlTree()
result.position = Position{1, 1}
parser := &tomlParser{
flowIdx: 0,
flow: flow,
tree: result,
tokensBuffer: make([]token, 0),
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
}

153
vendor/github.com/pelletier/go-toml/query.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package toml
import (
"time"
)
// NodeFilterFn represents a user-defined filter function, for use with
// Query.SetFilter().
//
// The return value of the function must indicate if 'node' is to be included
// at this stage of the TOML path. Returning true will include the node, and
// returning false will exclude it.
//
// NOTE: Care should be taken to write script callbacks such that they are safe
// to use from multiple goroutines.
type NodeFilterFn func(node interface{}) bool
// QueryResult is the result of Executing a Query.
type QueryResult struct {
items []interface{}
positions []Position
}
// appends a value/position pair to the result set.
func (r *QueryResult) appendResult(node interface{}, pos Position) {
r.items = append(r.items, node)
r.positions = append(r.positions, pos)
}
// Values is a set of values within a QueryResult. The order of values is not
// guaranteed to be in document order, and may be different each time a query is
// executed.
func (r QueryResult) Values() []interface{} {
values := make([]interface{}, len(r.items))
for i, v := range r.items {
o, ok := v.(*tomlValue)
if ok {
values[i] = o.value
} else {
values[i] = v
}
}
return values
}
// Positions is a set of positions for values within a QueryResult. Each index
// in Positions() corresponds to the entry in Value() of the same index.
func (r QueryResult) Positions() []Position {
return r.positions
}
// runtime context for executing query paths
type queryContext struct {
result *QueryResult
filters *map[string]NodeFilterFn
lastPosition Position
}
// generic path functor interface
type pathFn interface {
setNext(next pathFn)
call(node interface{}, ctx *queryContext)
}
// A Query is the representation of a compiled TOML path. A Query is safe
// for concurrent use by multiple goroutines.
type Query struct {
root pathFn
tail pathFn
filters *map[string]NodeFilterFn
}
func newQuery() *Query {
return &Query{
root: nil,
tail: nil,
filters: &defaultFilterFunctions,
}
}
func (q *Query) appendPath(next pathFn) {
if q.root == nil {
q.root = next
} else {
q.tail.setNext(next)
}
q.tail = next
next.setNext(newTerminatingFn()) // init the next functor
}
// CompileQuery compiles a TOML path expression. The returned Query can be used
// to match elements within a TomlTree and its descendants.
func CompileQuery(path string) (*Query, error) {
return parseQuery(lexQuery(path))
}
// Execute executes a query against a TomlTree, and returns the result of the query.
func (q *Query) Execute(tree *TomlTree) *QueryResult {
result := &QueryResult{
items: []interface{}{},
positions: []Position{},
}
if q.root == nil {
result.appendResult(tree, tree.GetPosition(""))
} else {
ctx := &queryContext{
result: result,
filters: q.filters,
}
q.root.call(tree, ctx)
}
return result
}
// SetFilter sets a user-defined filter function. These may be used inside
// "?(..)" query expressions to filter TOML document elements within a query.
func (q *Query) SetFilter(name string, fn NodeFilterFn) {
if q.filters == &defaultFilterFunctions {
// clone the static table
q.filters = &map[string]NodeFilterFn{}
for k, v := range defaultFilterFunctions {
(*q.filters)[k] = v
}
}
(*q.filters)[name] = fn
}
var defaultFilterFunctions = map[string]NodeFilterFn{
"tree": func(node interface{}) bool {
_, ok := node.(*TomlTree)
return ok
},
"int": func(node interface{}) bool {
_, ok := node.(int64)
return ok
},
"float": func(node interface{}) bool {
_, ok := node.(float64)
return ok
},
"string": func(node interface{}) bool {
_, ok := node.(string)
return ok
},
"time": func(node interface{}) bool {
_, ok := node.(time.Time)
return ok
},
"bool": func(node interface{}) bool {
_, ok := node.(bool)
return ok
},
}

356
vendor/github.com/pelletier/go-toml/querylexer.go generated vendored Normal file
View File

@ -0,0 +1,356 @@
// TOML JSONPath lexer.
//
// Written using the principles developed by Rob Pike in
// http://www.youtube.com/watch?v=HxaD_trXwRE
package toml
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
// Lexer state function
type queryLexStateFn func() queryLexStateFn
// Lexer definition
type queryLexer struct {
input string
start int
pos int
width int
tokens chan token
depth int
line int
col int
stringTerm string
}
func (l *queryLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
close(l.tokens)
}
func (l *queryLexer) nextStart() {
// iterate by runes (utf8 characters)
// search for newlines and advance line/col counts
for i := l.start; i < l.pos; {
r, width := utf8.DecodeRuneInString(l.input[i:])
if r == '\n' {
l.line++
l.col = 1
} else {
l.col++
}
i += width
}
// advance start position to next token
l.start = l.pos
}
func (l *queryLexer) emit(t tokenType) {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: t,
val: l.input[l.start:l.pos],
}
l.nextStart()
}
func (l *queryLexer) emitWithValue(t tokenType, value string) {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: t,
val: value,
}
l.nextStart()
}
func (l *queryLexer) next() rune {
if l.pos >= len(l.input) {
l.width = 0
return eof
}
var r rune
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += l.width
return r
}
func (l *queryLexer) ignore() {
l.nextStart()
}
func (l *queryLexer) backup() {
l.pos -= l.width
}
func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn {
l.tokens <- token{
Position: Position{l.line, l.col},
typ: tokenError,
val: fmt.Sprintf(format, args...),
}
return nil
}
func (l *queryLexer) peek() rune {
r := l.next()
l.backup()
return r
}
func (l *queryLexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
func (l *queryLexer) follow(next string) bool {
return strings.HasPrefix(l.input[l.pos:], next)
}
func (l *queryLexer) lexVoid() queryLexStateFn {
for {
next := l.peek()
switch next {
case '$':
l.pos++
l.emit(tokenDollar)
continue
case '.':
if l.follow("..") {
l.pos += 2
l.emit(tokenDotDot)
} else {
l.pos++
l.emit(tokenDot)
}
continue
case '[':
l.pos++
l.emit(tokenLeftBracket)
continue
case ']':
l.pos++
l.emit(tokenRightBracket)
continue
case ',':
l.pos++
l.emit(tokenComma)
continue
case '*':
l.pos++
l.emit(tokenStar)
continue
case '(':
l.pos++
l.emit(tokenLeftParen)
continue
case ')':
l.pos++
l.emit(tokenRightParen)
continue
case '?':
l.pos++
l.emit(tokenQuestion)
continue
case ':':
l.pos++
l.emit(tokenColon)
continue
case '\'':
l.ignore()
l.stringTerm = string(next)
return l.lexString
case '"':
l.ignore()
l.stringTerm = string(next)
return l.lexString
}
if isSpace(next) {
l.next()
l.ignore()
continue
}
if isAlphanumeric(next) {
return l.lexKey
}
if next == '+' || next == '-' || isDigit(next) {
return l.lexNumber
}
if l.next() == eof {
break
}
return l.errorf("unexpected char: '%v'", next)
}
l.emit(tokenEOF)
return nil
}
func (l *queryLexer) lexKey() queryLexStateFn {
for {
next := l.peek()
if !isAlphanumeric(next) {
l.emit(tokenKey)
return l.lexVoid
}
if l.next() == eof {
break
}
}
l.emit(tokenEOF)
return nil
}
func (l *queryLexer) lexString() queryLexStateFn {
l.pos++
l.ignore()
growingString := ""
for {
if l.follow(l.stringTerm) {
l.emitWithValue(tokenString, growingString)
l.pos++
l.ignore()
return l.lexVoid
}
if l.follow("\\\"") {
l.pos++
growingString += "\""
} else if l.follow("\\'") {
l.pos++
growingString += "'"
} else if l.follow("\\n") {
l.pos++
growingString += "\n"
} else if l.follow("\\b") {
l.pos++
growingString += "\b"
} else if l.follow("\\f") {
l.pos++
growingString += "\f"
} else if l.follow("\\/") {
l.pos++
growingString += "/"
} else if l.follow("\\t") {
l.pos++
growingString += "\t"
} else if l.follow("\\r") {
l.pos++
growingString += "\r"
} else if l.follow("\\\\") {
l.pos++
growingString += "\\"
} else if l.follow("\\u") {
l.pos += 2
code := ""
for i := 0; i < 4; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
} else if l.follow("\\U") {
l.pos += 2
code := ""
for i := 0; i < 8; i++ {
c := l.peek()
l.pos++
if !isHexDigit(c) {
return l.errorf("unfinished unicode escape")
}
code = code + string(c)
}
l.pos--
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return l.errorf("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
} else if l.follow("\\") {
l.pos++
return l.errorf("invalid escape sequence: \\" + string(l.peek()))
} else {
growingString += string(l.peek())
}
if l.next() == eof {
break
}
}
return l.errorf("unclosed string")
}
func (l *queryLexer) lexNumber() queryLexStateFn {
l.ignore()
if !l.accept("+") {
l.accept("-")
}
pointSeen := false
digitSeen := false
for {
next := l.next()
if next == '.' {
if pointSeen {
return l.errorf("cannot have two dots in one float")
}
if !isDigit(l.peek()) {
return l.errorf("float cannot end with a dot")
}
pointSeen = true
} else if isDigit(next) {
digitSeen = true
} else {
l.backup()
break
}
if pointSeen && !digitSeen {
return l.errorf("cannot start float with a dot")
}
}
if !digitSeen {
return l.errorf("no digit in that number")
}
if pointSeen {
l.emit(tokenFloat)
} else {
l.emit(tokenInteger)
}
return l.lexVoid
}
// Entry point
func lexQuery(input string) chan token {
l := &queryLexer{
input: input,
tokens: make(chan token),
line: 1,
col: 1,
}
go l.run()
return l.tokens
}

275
vendor/github.com/pelletier/go-toml/queryparser.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
/*
Based on the "jsonpath" spec/concept.
http://goessner.net/articles/JsonPath/
https://code.google.com/p/json-path/
*/
package toml
import (
"fmt"
)
const maxInt = int(^uint(0) >> 1)
type queryParser struct {
flow chan token
tokensBuffer []token
query *Query
union []pathFn
err error
}
type queryParserStateFn func() queryParserStateFn
// Formats and panics an error message based on a token
func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn {
p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...)
return nil // trigger parse to end
}
func (p *queryParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *queryParser) backup(tok *token) {
p.tokensBuffer = append(p.tokensBuffer, *tok)
}
func (p *queryParser) peek() *token {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.backup(&tok)
return &tok
}
func (p *queryParser) lookahead(types ...tokenType) bool {
result := true
buffer := []token{}
for _, typ := range types {
tok := p.getToken()
if tok == nil {
result = false
break
}
buffer = append(buffer, *tok)
if tok.typ != typ {
result = false
break
}
}
// add the tokens back to the buffer, and return
p.tokensBuffer = append(p.tokensBuffer, buffer...)
return result
}
func (p *queryParser) getToken() *token {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &tok
}
func (p *queryParser) parseStart() queryParserStateFn {
tok := p.getToken()
if tok == nil || tok.typ == tokenEOF {
return nil
}
if tok.typ != tokenDollar {
return p.parseError(tok, "Expected '$' at start of expression")
}
return p.parseMatchExpr
}
// handle '.' prefix, '[]', and '..'
func (p *queryParser) parseMatchExpr() queryParserStateFn {
tok := p.getToken()
switch tok.typ {
case tokenDotDot:
p.query.appendPath(&matchRecursiveFn{})
// nested parse for '..'
tok := p.getToken()
switch tok.typ {
case tokenKey:
p.query.appendPath(newMatchKeyFn(tok.val))
return p.parseMatchExpr
case tokenLeftBracket:
return p.parseBracketExpr
case tokenStar:
// do nothing - the recursive predicate is enough
return p.parseMatchExpr
}
case tokenDot:
// nested parse for '.'
tok := p.getToken()
switch tok.typ {
case tokenKey:
p.query.appendPath(newMatchKeyFn(tok.val))
return p.parseMatchExpr
case tokenStar:
p.query.appendPath(&matchAnyFn{})
return p.parseMatchExpr
}
case tokenLeftBracket:
return p.parseBracketExpr
case tokenEOF:
return nil // allow EOF at this stage
}
return p.parseError(tok, "expected match expression")
}
func (p *queryParser) parseBracketExpr() queryParserStateFn {
if p.lookahead(tokenInteger, tokenColon) {
return p.parseSliceExpr
}
if p.peek().typ == tokenColon {
return p.parseSliceExpr
}
return p.parseUnionExpr
}
func (p *queryParser) parseUnionExpr() queryParserStateFn {
var tok *token
// this state can be traversed after some sub-expressions
// so be careful when setting up state in the parser
if p.union == nil {
p.union = []pathFn{}
}
loop: // labeled loop for easy breaking
for {
if len(p.union) > 0 {
// parse delimiter or terminator
tok = p.getToken()
switch tok.typ {
case tokenComma:
// do nothing
case tokenRightBracket:
break loop
default:
return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val)
}
}
// parse sub expression
tok = p.getToken()
switch tok.typ {
case tokenInteger:
p.union = append(p.union, newMatchIndexFn(tok.Int()))
case tokenKey:
p.union = append(p.union, newMatchKeyFn(tok.val))
case tokenString:
p.union = append(p.union, newMatchKeyFn(tok.val))
case tokenQuestion:
return p.parseFilterExpr
default:
return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union))
}
}
// if there is only one sub-expression, use that instead
if len(p.union) == 1 {
p.query.appendPath(p.union[0])
} else {
p.query.appendPath(&matchUnionFn{p.union})
}
p.union = nil // clear out state
return p.parseMatchExpr
}
func (p *queryParser) parseSliceExpr() queryParserStateFn {
// init slice to grab all elements
start, end, step := 0, maxInt, 1
// parse optional start
tok := p.getToken()
if tok.typ == tokenInteger {
start = tok.Int()
tok = p.getToken()
}
if tok.typ != tokenColon {
return p.parseError(tok, "expected ':'")
}
// parse optional end
tok = p.getToken()
if tok.typ == tokenInteger {
end = tok.Int()
tok = p.getToken()
}
if tok.typ == tokenRightBracket {
p.query.appendPath(newMatchSliceFn(start, end, step))
return p.parseMatchExpr
}
if tok.typ != tokenColon {
return p.parseError(tok, "expected ']' or ':'")
}
// parse optional step
tok = p.getToken()
if tok.typ == tokenInteger {
step = tok.Int()
if step < 0 {
return p.parseError(tok, "step must be a positive value")
}
tok = p.getToken()
}
if tok.typ != tokenRightBracket {
return p.parseError(tok, "expected ']'")
}
p.query.appendPath(newMatchSliceFn(start, end, step))
return p.parseMatchExpr
}
func (p *queryParser) parseFilterExpr() queryParserStateFn {
tok := p.getToken()
if tok.typ != tokenLeftParen {
return p.parseError(tok, "expected left-parenthesis for filter expression")
}
tok = p.getToken()
if tok.typ != tokenKey && tok.typ != tokenString {
return p.parseError(tok, "expected key or string for filter funciton name")
}
name := tok.val
tok = p.getToken()
if tok.typ != tokenRightParen {
return p.parseError(tok, "expected right-parenthesis for filter expression")
}
p.union = append(p.union, newMatchFilterFn(name, tok.Position))
return p.parseUnionExpr
}
func parseQuery(flow chan token) (*Query, error) {
parser := &queryParser{
flow: flow,
tokensBuffer: []token{},
query: newQuery(),
}
parser.run()
return parser.query, parser.err
}

View File

@ -135,6 +135,5 @@ func isDigit(r rune) bool {
func isHexDigit(r rune) bool {
return isDigit(r) ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
r == 'A' || r == 'B' || r == 'C' || r == 'D' || r == 'E' || r == 'F'
}

View File

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strings"
@ -12,42 +11,31 @@ import (
type tomlValue struct {
value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
comment string
commented bool
position Position
}
// Tree is the result of the parsing of a TOML file.
type Tree struct {
values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
comment string
commented bool
// TomlTree is the result of the parsing of a TOML file.
type TomlTree struct {
values map[string]interface{} // string -> *tomlValue, *TomlTree, []*TomlTree
position Position
}
func newTree() *Tree {
return &Tree{
func newTomlTree() *TomlTree {
return &TomlTree{
values: make(map[string]interface{}),
position: Position{},
}
}
// TreeFromMap initializes a new Tree object using the given map.
func TreeFromMap(m map[string]interface{}) (*Tree, error) {
result, err := toTree(m)
if err != nil {
return nil, err
// TreeFromMap initializes a new TomlTree object using the given map.
func TreeFromMap(m map[string]interface{}) *TomlTree {
return &TomlTree{
values: m,
}
return result.(*Tree), nil
}
// Position returns the position of the tree.
func (t *Tree) Position() Position {
return t.position
}
// Has returns a boolean indicating if the given key exists.
func (t *Tree) Has(key string) bool {
func (t *TomlTree) Has(key string) bool {
if key == "" {
return false
}
@ -55,26 +43,25 @@ func (t *Tree) Has(key string) bool {
}
// HasPath returns true if the given path of keys exists, false otherwise.
func (t *Tree) HasPath(keys []string) bool {
func (t *TomlTree) HasPath(keys []string) bool {
return t.GetPath(keys) != nil
}
// Keys returns the keys of the toplevel tree (does not recurse).
func (t *Tree) Keys() []string {
keys := make([]string, len(t.values))
i := 0
// Keys returns the keys of the toplevel tree.
// Warning: this is a costly operation.
func (t *TomlTree) Keys() []string {
var keys []string
for k := range t.values {
keys[i] = k
i++
keys = append(keys, k)
}
return keys
}
// Get the value at key in the Tree.
// Get the value at key in the TomlTree.
// Key is a dot-separated path (e.g. a.b.c).
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
func (t *Tree) Get(key string) interface{} {
func (t *TomlTree) Get(key string) interface{} {
if key == "" {
return t
}
@ -87,7 +74,7 @@ func (t *Tree) Get(key string) interface{} {
// GetPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPath(keys []string) interface{} {
func (t *TomlTree) GetPath(keys []string) interface{} {
if len(keys) == 0 {
return t
}
@ -98,9 +85,9 @@ func (t *Tree) GetPath(keys []string) interface{} {
return nil
}
switch node := value.(type) {
case *Tree:
case *TomlTree:
subtree = node
case []*Tree:
case []*TomlTree:
// go to most recent element
if len(node) == 0 {
return nil
@ -120,7 +107,7 @@ func (t *Tree) GetPath(keys []string) interface{} {
}
// GetPosition returns the position of the given key.
func (t *Tree) GetPosition(key string) Position {
func (t *TomlTree) GetPosition(key string) Position {
if key == "" {
return t.position
}
@ -129,7 +116,7 @@ func (t *Tree) GetPosition(key string) Position {
// GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPositionPath(keys []string) Position {
func (t *TomlTree) GetPositionPath(keys []string) Position {
if len(keys) == 0 {
return t.position
}
@ -140,9 +127,9 @@ func (t *Tree) GetPositionPath(keys []string) Position {
return Position{0, 0}
}
switch node := value.(type) {
case *Tree:
case *TomlTree:
subtree = node
case []*Tree:
case []*TomlTree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
@ -156,9 +143,9 @@ func (t *Tree) GetPositionPath(keys []string) Position {
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.position
case *Tree:
case *TomlTree:
return node.position
case []*Tree:
case []*TomlTree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
@ -170,7 +157,7 @@ func (t *Tree) GetPositionPath(keys []string) Position {
}
// GetDefault works like Get but with a default value
func (t *Tree) GetDefault(key string, def interface{}) interface{} {
func (t *TomlTree) GetDefault(key string, def interface{}) interface{} {
val := t.Get(key)
if val == nil {
return def
@ -180,30 +167,30 @@ func (t *Tree) GetDefault(key string, def interface{}) interface{} {
// Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) Set(key string, comment string, commented bool, value interface{}) {
t.SetPath(strings.Split(key, "."), comment, commented, value)
// Creates all necessary intermediates trees, if needed.
func (t *TomlTree) Set(key string, value interface{}) {
t.SetPath(strings.Split(key, "."), value)
}
// SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) SetPath(keys []string, comment string, commented bool, value interface{}) {
// Creates all necessary intermediates trees, if needed.
func (t *TomlTree) SetPath(keys []string, value interface{}) {
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
nextTree = newTree()
nextTree = newTomlTree()
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
case *Tree:
case *TomlTree:
subtree = node
case []*Tree:
case []*TomlTree:
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
subtree.values[intermediateKey] = append(node, newTree())
subtree.values[intermediateKey] = append(node, newTomlTree())
}
subtree = node[len(node)-1]
}
@ -212,18 +199,14 @@ func (t *Tree) SetPath(keys []string, comment string, commented bool, value inte
var toInsert interface{}
switch value.(type) {
case *Tree:
tt := value.(*Tree)
tt.comment = comment
case *TomlTree:
toInsert = value
case []*Tree:
case []*TomlTree:
toInsert = value
case *tomlValue:
tt := value.(*tomlValue)
tt.comment = comment
toInsert = tt
toInsert = value
default:
toInsert = &tomlValue{value: value, comment: comment, commented: commented}
toInsert = &tomlValue{value: value}
}
subtree.values[keys[len(keys)-1]] = toInsert
@ -236,21 +219,21 @@ func (t *Tree) SetPath(keys []string, comment string, commented bool, value inte
// and tree[a][b][c]
//
// Returns nil on success, error object on failure
func (t *Tree) createSubTree(keys []string, pos Position) error {
func (t *TomlTree) createSubTree(keys []string, pos Position) error {
subtree := t
for _, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
tree := newTree()
tree := newTomlTree()
tree.position = pos
subtree.values[intermediateKey] = tree
nextTree = tree
}
switch node := nextTree.(type) {
case []*Tree:
case []*TomlTree:
subtree = node[len(node)-1]
case *Tree:
case *TomlTree:
subtree = node
default:
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
@ -260,8 +243,17 @@ func (t *Tree) createSubTree(keys []string, pos Position) error {
return nil
}
// LoadBytes creates a Tree from a []byte.
func LoadBytes(b []byte) (tree *Tree, err error) {
// Query compiles and executes a query on a tree and returns the query result.
func (t *TomlTree) Query(query string) (*QueryResult, error) {
q, err := CompileQuery(query)
if err != nil {
return nil, err
}
return q.Execute(t), nil
}
// LoadReader creates a TomlTree from any io.Reader.
func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
@ -270,27 +262,17 @@ func LoadBytes(b []byte) (tree *Tree, err error) {
err = errors.New(r.(string))
}
}()
tree = parseToml(lexToml(b))
tree = parseToml(lexToml(reader))
return
}
// LoadReader creates a Tree from any io.Reader.
func LoadReader(reader io.Reader) (tree *Tree, err error) {
inputBytes, err := ioutil.ReadAll(reader)
if err != nil {
return
}
tree, err = LoadBytes(inputBytes)
return
// Load creates a TomlTree from a string.
func Load(content string) (tree *TomlTree, err error) {
return LoadReader(strings.NewReader(content))
}
// Load creates a Tree from a string.
func Load(content string) (tree *Tree, err error) {
return LoadBytes([]byte(content))
}
// LoadFile creates a Tree from a file.
func LoadFile(path string) (tree *Tree, err error) {
// LoadFile creates a TomlTree from a file.
func LoadFile(path string) (tree *TomlTree, err error) {
file, err := os.Open(path)
if err != nil {
return nil, err

View File

@ -1,142 +0,0 @@
package toml
import (
"fmt"
"reflect"
"time"
)
var kindToType = [reflect.String + 1]reflect.Type{
reflect.Bool: reflect.TypeOf(true),
reflect.String: reflect.TypeOf(""),
reflect.Float32: reflect.TypeOf(float64(1)),
reflect.Float64: reflect.TypeOf(float64(1)),
reflect.Int: reflect.TypeOf(int64(1)),
reflect.Int8: reflect.TypeOf(int64(1)),
reflect.Int16: reflect.TypeOf(int64(1)),
reflect.Int32: reflect.TypeOf(int64(1)),
reflect.Int64: reflect.TypeOf(int64(1)),
reflect.Uint: reflect.TypeOf(uint64(1)),
reflect.Uint8: reflect.TypeOf(uint64(1)),
reflect.Uint16: reflect.TypeOf(uint64(1)),
reflect.Uint32: reflect.TypeOf(uint64(1)),
reflect.Uint64: reflect.TypeOf(uint64(1)),
}
// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
// supported values:
// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
func typeFor(k reflect.Kind) reflect.Type {
if k > 0 && int(k) < len(kindToType) {
return kindToType[k]
}
return nil
}
func simpleValueCoercion(object interface{}) (interface{}, error) {
switch original := object.(type) {
case string, bool, int64, uint64, float64, time.Time:
return original, nil
case int:
return int64(original), nil
case int8:
return int64(original), nil
case int16:
return int64(original), nil
case int32:
return int64(original), nil
case uint:
return uint64(original), nil
case uint8:
return uint64(original), nil
case uint16:
return uint64(original), nil
case uint32:
return uint64(original), nil
case float32:
return float64(original), nil
case fmt.Stringer:
return original.String(), nil
default:
return nil, fmt.Errorf("cannot convert type %T to Tree", object)
}
}
func sliceToTree(object interface{}) (interface{}, error) {
// arrays are a bit tricky, since they can represent either a
// collection of simple values, which is represented by one
// *tomlValue, or an array of tables, which is represented by an
// array of *Tree.
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
value := reflect.ValueOf(object)
insideType := value.Type().Elem()
length := value.Len()
if length > 0 {
insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
}
if insideType.Kind() == reflect.Map {
// this is considered as an array of tables
tablesArray := make([]*Tree, 0, length)
for i := 0; i < length; i++ {
table := value.Index(i)
tree, err := toTree(table.Interface())
if err != nil {
return nil, err
}
tablesArray = append(tablesArray, tree.(*Tree))
}
return tablesArray, nil
}
sliceType := typeFor(insideType.Kind())
if sliceType == nil {
sliceType = insideType
}
arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
for i := 0; i < length; i++ {
val := value.Index(i).Interface()
simpleValue, err := simpleValueCoercion(val)
if err != nil {
return nil, err
}
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
}
return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
}
func toTree(object interface{}) (interface{}, error) {
value := reflect.ValueOf(object)
if value.Kind() == reflect.Map {
values := map[string]interface{}{}
keys := value.MapKeys()
for _, key := range keys {
if key.Kind() != reflect.String {
if _, ok := key.Interface().(string); !ok {
return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
}
}
v := value.MapIndex(key)
newValue, err := toTree(v.Interface())
if err != nil {
return nil, err
}
values[key.String()] = newValue
}
return &Tree{values: values, position: Position{}}, nil
}
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
return sliceToTree(object)
}
simpleValue, err := simpleValueCoercion(object)
if err != nil {
return nil, err
}
return &tomlValue{value: simpleValue, position: Position{}}, nil
}

View File

@ -4,8 +4,6 @@ import (
"bytes"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
@ -14,34 +12,33 @@ import (
// encodes a string to a TOML-compliant string value
func encodeTomlString(value string) string {
var b bytes.Buffer
result := ""
for _, rr := range value {
switch rr {
case '\b':
b.WriteString(`\b`)
result += "\\b"
case '\t':
b.WriteString(`\t`)
result += "\\t"
case '\n':
b.WriteString(`\n`)
result += "\\n"
case '\f':
b.WriteString(`\f`)
result += "\\f"
case '\r':
b.WriteString(`\r`)
result += "\\r"
case '"':
b.WriteString(`\"`)
result += "\\\""
case '\\':
b.WriteString(`\\`)
result += "\\\\"
default:
intRr := uint16(rr)
if intRr < 0x001F {
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
result += fmt.Sprintf("\\u%0.4X", intRr)
} else {
b.WriteRune(rr)
result += string(rr)
}
}
}
return b.String()
return result
}
func tomlValueStringRepresentation(v interface{}) (string, error) {
@ -51,17 +48,9 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
case int64:
return strconv.FormatInt(value, 10), nil
case float64:
// Ensure a round float does contain a decimal point. Otherwise feeding
// the output back to the parser would convert to an integer.
if math.Trunc(value) == value {
return strconv.FormatFloat(value, 'f', 1, 32), nil
}
return strconv.FormatFloat(value, 'f', -1, 32), nil
case string:
return "\"" + encodeTomlString(value) + "\"", nil
case []byte:
b, _ := v.([]byte)
return tomlValueStringRepresentation(string(b))
case bool:
if value {
return "true", nil
@ -71,14 +60,9 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
return value.Format(time.RFC3339), nil
case nil:
return "", nil
}
rv := reflect.ValueOf(v)
if rv.Kind() == reflect.Slice {
case []interface{}:
values := []string{}
for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface()
for _, item := range value {
itemRepr, err := tomlValueStringRepresentation(item)
if err != nil {
return "", err
@ -86,18 +70,19 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
values = append(values, itemRepr)
}
return "[" + strings.Join(values, ",") + "]", nil
default:
return "", fmt.Errorf("unsupported value type %T: %v", value, value)
}
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
}
func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
simpleValuesKeys := make([]string, 0)
complexValuesKeys := make([]string, 0)
for k := range t.values {
v := t.values[k]
switch v.(type) {
case *Tree, []*Tree:
case *TomlTree, []*TomlTree:
complexValuesKeys = append(complexValuesKeys, k)
default:
simpleValuesKeys = append(simpleValuesKeys, k)
@ -110,7 +95,7 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (
for _, k := range simpleValuesKeys {
v, ok := t.values[k].(*tomlValue)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
return bytesCount, fmt.Errorf("invalid key type at %s: %T", k, t.values[k])
}
repr, err := tomlValueStringRepresentation(v.value)
@ -118,24 +103,8 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (
return bytesCount, err
}
if v.comment != "" {
comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
start := "# "
if strings.HasPrefix(comment, "#") {
start = ""
}
writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
var commented string
if v.commented {
commented = "# "
}
writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
kvRepr := fmt.Sprintf("%s%s = %s\n", indent, k, repr)
writtenBytesCount, err := w.Write([]byte(kvRepr))
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
@ -149,31 +118,12 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (
if keyspace != "" {
combinedKey = keyspace + "." + combinedKey
}
var commented string
if t.commented {
commented = "# "
}
switch node := v.(type) {
// node has to be of those two types given how keys are sorted above
case *Tree:
tv, ok := t.values[k].(*Tree)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
}
if tv.comment != "" {
comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
start := "# "
if strings.HasPrefix(comment, "#") {
start = ""
}
writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
case *TomlTree:
tableName := fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
writtenBytesCount, err := w.Write([]byte(tableName))
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
@ -182,9 +132,11 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (
if err != nil {
return bytesCount, err
}
case []*Tree:
case []*TomlTree:
for _, subTree := range node {
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
if len(subTree.values) > 0 {
tableArrayName := fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
writtenBytesCount, err := w.Write([]byte(tableArrayName))
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
@ -197,32 +149,21 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (
}
}
}
}
return bytesCount, nil
}
func writeStrings(w io.Writer, s ...string) (int, error) {
var n int
for i := range s {
b, err := io.WriteString(w, s[i])
n += b
if err != nil {
return n, err
}
}
return n, nil
}
// WriteTo encode the Tree as Toml and writes it to the writer w.
// WriteTo encode the TomlTree as Toml and writes it to the writer w.
// Returns the number of bytes written in case of success, or an error if anything happened.
func (t *Tree) WriteTo(w io.Writer) (int64, error) {
func (t *TomlTree) WriteTo(w io.Writer) (int64, error) {
return t.writeTo(w, "", "", 0)
}
// ToTomlString generates a human-readable representation of the current tree.
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error.
func (t *Tree) ToTomlString() (string, error) {
func (t *TomlTree) ToTomlString() (string, error) {
var buf bytes.Buffer
_, err := t.WriteTo(&buf)
if err != nil {
@ -233,35 +174,36 @@ func (t *Tree) ToTomlString() (string, error) {
// String generates a human-readable representation of the current tree.
// Alias of ToString. Present to implement the fmt.Stringer interface.
func (t *Tree) String() string {
func (t *TomlTree) String() string {
result, _ := t.ToTomlString()
return result
}
// ToMap recursively generates a representation of the tree using Go built-in structures.
// The following types are used:
//
// * bool
// * float64
// * int64
// * string
// * uint64
// * int64
// * bool
// * string
// * time.Time
// * map[string]interface{} (where interface{} is any of this list)
// * []interface{} (where interface{} is any of this list)
func (t *Tree) ToMap() map[string]interface{} {
func (t *TomlTree) ToMap() map[string]interface{} {
result := map[string]interface{}{}
for k, v := range t.values {
switch node := v.(type) {
case []*Tree:
case []*TomlTree:
var array []interface{}
for _, item := range node {
array = append(array, item.ToMap())
}
result[k] = array
case *Tree:
case *TomlTree:
result[k] = node.ToMap()
case map[string]interface{}:
sub := TreeFromMap(node)
result[k] = sub.ToMap()
case *tomlValue:
result[k] = node.value
}

View File

@ -79,14 +79,6 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':

View File

@ -52,7 +52,7 @@ func validateBasePathName(name string) error {
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return &os.PathError{Op: "realPath", Path: name, Err: errors.New("got a real OS path instead of a virtual")}
return &os.PathError{"realPath", name, errors.New("got a real OS path instead of a virtual")}
}
return nil
@ -60,14 +60,14 @@ func validateBasePathName(name string) error {
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
return &os.PathError{"chtimes", name, err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
return &os.PathError{"chmod", name, err}
}
return b.source.Chmod(name, mode)
}
@ -78,66 +78,66 @@ func (b *BasePathFs) Name() string {
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
return nil, &os.PathError{"stat", name, err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
return &os.PathError{"rename", oldname, err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
return &os.PathError{"rename", newname, err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
return &os.PathError{"remove_all", name, err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
return &os.PathError{"remove", name, err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
return nil, &os.PathError{"openfile", name, err}
}
return b.source.OpenFile(name, flag, mode)
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
return nil, &os.PathError{"open", name, err}
}
return b.source.Open(name)
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
return &os.PathError{"mkdir", name, err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
return &os.PathError{"mkdir", name, err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
return nil, &os.PathError{"create", name, err}
}
return b.source.Create(name)
}

View File

@ -64,10 +64,15 @@ func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileIn
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
if err == syscall.ENOENT {
return cacheMiss, nil, nil
}
var ok bool
if err, ok = err.(*os.PathError); ok {
if err == os.ErrNotExist {
return cacheMiss, nil, nil
}
}
return cacheMiss, nil, err
}

View File

@ -1,110 +0,0 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// afero does not support Lstat directly.
if _, err = lstatIfOs(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.IndexAny(path, "*?[") >= 0
}

View File

@ -74,24 +74,14 @@ func CreateDir(name string) *FileData {
}
func ChangeFileName(f *FileData, newname string) {
f.Lock()
f.name = newname
f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
f.Lock()
f.mode = mode
f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
f.Lock()
setModTime(f, mtime)
f.Unlock()
}
func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
@ -112,7 +102,7 @@ func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
setModTime(f.fileData, time.Now())
SetModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
@ -196,7 +186,7 @@ func (f *File) Truncate(size int64) error {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
return &os.PathError{"truncate", f.fileData.name, errors.New("file handle is read only")}
}
if size < 0 {
return ErrOutOfRange
@ -207,7 +197,7 @@ func (f *File) Truncate(size int64) error {
} else {
f.fileData.data = f.fileData.data[0:size]
}
setModTime(f.fileData, time.Now())
SetModTime(f.fileData, time.Now())
return nil
}
@ -228,7 +218,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
func (f *File) Write(b []byte) (n int, err error) {
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
return 0, &os.PathError{"write", f.fileData.name, errors.New("file handle is read only")}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
@ -246,7 +236,7 @@ func (f *File) Write(b []byte) (n int, err error) {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
setModTime(f.fileData, time.Now())
SetModTime(f.fileData, time.Now())
atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
return
@ -271,33 +261,17 @@ type FileInfo struct {
// Implements os.FileInfo
func (s *FileInfo) Name() string {
s.Lock()
_, name := filepath.Split(s.name)
s.Unlock()
return name
}
func (s *FileInfo) Mode() os.FileMode {
s.Lock()
defer s.Unlock()
return s.mode
}
func (s *FileInfo) ModTime() time.Time {
s.Lock()
defer s.Unlock()
return s.modtime
}
func (s *FileInfo) IsDir() bool {
s.Lock()
defer s.Unlock()
return s.dir
}
func (s *FileInfo) Mode() os.FileMode { return s.mode }
func (s *FileInfo) ModTime() time.Time { return s.modtime }
func (s *FileInfo) IsDir() bool { return s.dir }
func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
s.Lock()
defer s.Unlock()
return int64(len(s.data))
}

View File

@ -45,7 +45,7 @@ func (m *MemMapFs) getData() map[string]*mem.FileData {
return m.data
}
func (*MemMapFs) Name() string { return "MemMapFS" }
func (MemMapFs) Name() string { return "MemMapFS" }
func (m *MemMapFs) Create(name string) (File, error) {
name = normalizePath(name)
@ -66,10 +66,7 @@ func (m *MemMapFs) unRegisterWithParent(fileName string) error {
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
parent.Lock()
mem.RemoveFromMemDir(parent, f)
parent.Unlock()
return nil
}
@ -102,10 +99,8 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData) {
}
}
parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
@ -113,7 +108,7 @@ func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
x, ok := m.getData()[name]
if ok {
// Only return ErrFileExists if it's a file, not a directory.
i := mem.FileInfo{FileData: x}
i := mem.FileInfo{x}
if !i.IsDir() {
return ErrFileExists
}
@ -132,17 +127,15 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
_, ok := m.getData()[name]
m.mu.RUnlock()
if ok {
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
return &os.PathError{"mkdir", name, ErrFileExists}
} else {
m.mu.Lock()
item := mem.CreateDir(name)
m.getData()[name] = item
m.registerWithParent(item)
m.mu.Unlock()
m.Chmod(name, perm|os.ModeDir)
m.Chmod(name, perm)
}
return nil
}
@ -151,9 +144,10 @@ func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
}
} else {
return err
}
}
return nil
}
@ -194,7 +188,7 @@ func (m *MemMapFs) open(name string) (*mem.FileData, error) {
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
return nil, &os.PathError{"open", name, ErrFileNotFound}
}
return f, nil
}
@ -251,11 +245,11 @@ func (m *MemMapFs) Remove(name string) error {
if _, ok := m.getData()[name]; ok {
err := m.unRegisterWithParent(name)
if err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
return &os.PathError{"remove", name, err}
}
delete(m.getData(), name)
} else {
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
return &os.PathError{"remove", name, os.ErrNotExist}
}
return nil
}
@ -303,7 +297,7 @@ func (m *MemMapFs) Rename(oldname, newname string) error {
m.mu.Unlock()
m.mu.RLock()
} else {
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
return &os.PathError{"rename", oldname, ErrFileNotFound}
}
return nil
}
@ -319,12 +313,9 @@ func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
return &os.PathError{"chmod", name, ErrFileNotFound}
}
m.mu.Lock()
@ -336,12 +327,9 @@ func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
return &os.PathError{"chtimes", name, ErrFileNotFound}
}
m.mu.Lock()
@ -353,7 +341,7 @@ func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error
func (m *MemMapFs) List() {
for _, x := range m.data {
y := mem.FileInfo{FileData: x}
y := mem.FileInfo{x}
fmt.Println(x.Name(), y.Size())
}
}

14
vendor/github.com/spf13/afero/memradix.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero

76
vendor/github.com/spf13/cast/cast.go generated vendored
View File

@ -3,157 +3,81 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package cast provides easy and safe casting in Go.
package cast
import "time"
// ToBool casts an interface to a bool type.
func ToBool(i interface{}) bool {
v, _ := ToBoolE(i)
return v
}
// ToTime casts an interface to a time.Time type.
func ToTime(i interface{}) time.Time {
v, _ := ToTimeE(i)
return v
}
// ToDuration casts an interface to a time.Duration type.
func ToDuration(i interface{}) time.Duration {
v, _ := ToDurationE(i)
return v
}
// ToFloat64 casts an interface to a float64 type.
func ToFloat64(i interface{}) float64 {
v, _ := ToFloat64E(i)
return v
}
// ToFloat32 casts an interface to a float32 type.
func ToFloat32(i interface{}) float32 {
v, _ := ToFloat32E(i)
return v
}
// ToInt64 casts an interface to an int64 type.
func ToInt64(i interface{}) int64 {
v, _ := ToInt64E(i)
return v
}
// ToInt32 casts an interface to an int32 type.
func ToInt32(i interface{}) int32 {
v, _ := ToInt32E(i)
return v
}
// ToInt16 casts an interface to an int16 type.
func ToInt16(i interface{}) int16 {
v, _ := ToInt16E(i)
return v
}
// ToInt8 casts an interface to an int8 type.
func ToInt8(i interface{}) int8 {
v, _ := ToInt8E(i)
return v
}
// ToInt casts an interface to an int type.
func ToInt(i interface{}) int {
v, _ := ToIntE(i)
return v
}
// ToUint casts an interface to a uint type.
func ToUint(i interface{}) uint {
v, _ := ToUintE(i)
return v
}
// ToUint64 casts an interface to a uint64 type.
func ToUint64(i interface{}) uint64 {
v, _ := ToUint64E(i)
return v
}
// ToUint32 casts an interface to a uint32 type.
func ToUint32(i interface{}) uint32 {
v, _ := ToUint32E(i)
return v
}
// ToUint16 casts an interface to a uint16 type.
func ToUint16(i interface{}) uint16 {
v, _ := ToUint16E(i)
return v
}
// ToUint8 casts an interface to a uint8 type.
func ToUint8(i interface{}) uint8 {
v, _ := ToUint8E(i)
return v
}
// ToString casts an interface to a string type.
func ToString(i interface{}) string {
v, _ := ToStringE(i)
return v
}
// ToStringMapString casts an interface to a map[string]string type.
func ToStringMapString(i interface{}) map[string]string {
v, _ := ToStringMapStringE(i)
return v
}
// ToStringMapStringSlice casts an interface to a map[string][]string type.
func ToStringMapStringSlice(i interface{}) map[string][]string {
v, _ := ToStringMapStringSliceE(i)
return v
}
// ToStringMapBool casts an interface to a map[string]bool type.
func ToStringMapBool(i interface{}) map[string]bool {
v, _ := ToStringMapBoolE(i)
return v
}
// ToStringMap casts an interface to a map[string]interface{} type.
func ToStringMap(i interface{}) map[string]interface{} {
v, _ := ToStringMapE(i)
return v
}
// ToSlice casts an interface to a []interface{} type.
func ToSlice(i interface{}) []interface{} {
v, _ := ToSliceE(i)
return v
}
// ToBoolSlice casts an interface to a []bool type.
func ToBoolSlice(i interface{}) []bool {
v, _ := ToBoolSliceE(i)
return v
}
// ToStringSlice casts an interface to a []string type.
func ToStringSlice(i interface{}) []string {
v, _ := ToStringSliceE(i)
return v
}
// ToIntSlice casts an interface to a []int type.
func ToIntSlice(i interface{}) []int {
v, _ := ToIntSliceE(i)
return v
}
// ToDurationSlice casts an interface to a []time.Duration type.
func ToDurationSlice(i interface{}) []time.Duration {
v, _ := ToDurationSliceE(i)
return v
}

779
vendor/github.com/spf13/cast/caste.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,98 +0,0 @@
package cobra
import (
"fmt"
)
type PositionalArgs func(cmd *Command, args []string) error
// Legacy arg validation has the following behaviour:
// - root commands with no subcommands can take arbitrary arguments
// - root commands with subcommands will do subcommand validity checking
// - subcommands will always accept arbitrary arguments
func legacyArgs(cmd *Command, args []string) error {
// no subcommand, always take args
if !cmd.HasSubCommands() {
return nil
}
// root command with subcommands, do subcommand checking
if !cmd.HasParent() && len(args) > 0 {
return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
return nil
}
// NoArgs returns an error if any args are included
func NoArgs(cmd *Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
}
return nil
}
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
for _, v := range args {
if !stringInSlice(v, cmd.ValidArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
}
return nil
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// ArbitraryArgs never returns an error
func ArbitraryArgs(cmd *Command, args []string) error {
return nil
}
// MinimumNArgs returns an error if there is not at least N args
func MinimumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < n {
return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
}
return nil
}
}
// MaximumNArgs returns an error if there are more than N args
func MaximumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) > n {
return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
}
return nil
}
}
// ExactArgs returns an error if there are not exactly n args
func ExactArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) != n {
return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
}
return nil
}
}
// RangeArgs returns an error if the number of args is not within the expected range
func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < min || len(args) > max {
return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
}
return nil
}
}

View File

@ -1,7 +1,6 @@
package cobra
import (
"bytes"
"fmt"
"io"
"os"
@ -11,7 +10,6 @@ import (
"github.com/spf13/pflag"
)
// Annotations for Bash completion.
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
BashCompCustom = "cobra_annotation_bash_completion_custom"
@ -19,9 +17,12 @@ const (
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
func writePreamble(buf *bytes.Buffer, name string) {
buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
buf.WriteString(`
func preamble(out io.Writer, name string) error {
_, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name)
if err != nil {
return err
}
_, err = fmt.Fprint(out, `
__debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
@ -86,8 +87,8 @@ __handle_reply()
local index flag
flag="${cur%%=*}"
__index_of_word "${flag}" "${flags_with_completion[@]}"
COMPREPLY=()
if [[ ${index} -ge 0 ]]; then
COMPREPLY=()
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
@ -132,10 +133,7 @@ __handle_reply()
declare -F __custom_func >/dev/null && __custom_func
fi
# available in bash-completion >= 2, not always present on macOS
if declare -F __ltrim_colon_completions >/dev/null; then
__ltrim_colon_completions "$cur"
fi
}
# The arguments should be in the form "ext1|ext2|extn"
@ -226,7 +224,7 @@ __handle_command()
fi
c=$((c+1))
__debug "${FUNCNAME[0]}: looking for ${next_command}"
declare -F "$next_command" >/dev/null && $next_command
declare -F $next_command >/dev/null && $next_command
}
__handle_word()
@ -249,12 +247,16 @@ __handle_word()
}
`)
return err
}
func writePostscript(buf *bytes.Buffer, name string) {
func postscript(w io.Writer, name string) error {
name = strings.Replace(name, ":", "__", -1)
buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
buf.WriteString(fmt.Sprintf(`{
_, err := fmt.Fprintf(w, "__start_%s()\n", name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `{
local cur prev words cword
declare -A flaghash 2>/dev/null || :
if declare -F _init_completion >/dev/null 2>&1; then
@ -278,132 +280,197 @@ func writePostscript(buf *bytes.Buffer, name string) {
__handle_word
}
`, name))
buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
`, name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%s %s
else
complete -o default -o nospace -F __start_%s %s
fi
`, name, name, name, name))
buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
`, name, name, name, name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n")
return err
}
func writeCommands(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" commands=()\n")
func writeCommands(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil {
return err
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil {
return err
}
buf.WriteString("\n")
}
_, err := fmt.Fprintf(w, "\n")
return err
}
func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string) {
func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) > 0 {
ext = "__handle_filename_extension_flag " + strings.Join(value, "|")
} else {
ext = "_filedir"
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
if err != nil {
return err
}
if len(value) > 0 {
ext := "__handle_filename_extension_flag " + strings.Join(value, "|")
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
} else {
ext := "_filedir"
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
}
if err != nil {
return err
}
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
case BashCompCustom:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
if err != nil {
return err
}
if len(value) > 0 {
handlers := strings.Join(value, "; ")
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers)
} else {
buf.WriteString(" flags_completion+=(:)\n")
_, err = fmt.Fprintf(w, " flags_completion+=(:)\n")
}
if err != nil {
return err
}
case BashCompSubdirsInDir:
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
var ext string
if len(value) == 1 {
ext = "__handle_subdirs_in_dir_flag " + value[0]
ext := "__handle_subdirs_in_dir_flag " + value[0]
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
} else {
ext = "_filedir -d"
ext := "_filedir -d"
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
}
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
if err != nil {
return err
}
}
}
return nil
}
func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) {
func writeShortFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
name := flag.Shorthand
format := " "
if len(flag.NoOptDefVal) == 0 {
if !b {
format += "two_word_"
}
format += "flags+=(\"-%s\")\n"
buf.WriteString(fmt.Sprintf(format, name))
writeFlagHandler(buf, "-"+name, flag.Annotations)
if _, err := fmt.Fprintf(w, format, name); err != nil {
return err
}
return writeFlagHandler("-"+name, flag.Annotations, w)
}
func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) {
func writeFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
name := flag.Name
format := " flags+=(\"--%s"
if len(flag.NoOptDefVal) == 0 {
if !b {
format += "="
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, name))
writeFlagHandler(buf, "--"+name, flag.Annotations)
if _, err := fmt.Fprintf(w, format, name); err != nil {
return err
}
return writeFlagHandler("--"+name, flag.Annotations, w)
}
func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
name := flag.Name
format := " local_nonpersistent_flags+=(\"--%s"
if len(flag.NoOptDefVal) == 0 {
if !b {
format += "="
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, name))
_, err := fmt.Fprintf(w, format, name)
return err
}
func writeFlags(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(` flags=()
func writeFlags(cmd *Command, w io.Writer) error {
_, err := fmt.Fprintf(w, ` flags=()
two_word_flags=()
local_nonpersistent_flags=()
flags_with_completion=()
flags_completion=()
`)
if err != nil {
return err
}
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
var visitErr error
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag)
if err := writeFlag(flag, w); err != nil {
visitErr = err
return
}
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag)
if err := writeShortFlag(flag, w); err != nil {
visitErr = err
return
}
}
if localNonPersistentFlags.Lookup(flag.Name) != nil {
writeLocalNonPersistentFlag(buf, flag)
if err := writeLocalNonPersistentFlag(flag, w); err != nil {
visitErr = err
return
}
}
})
if visitErr != nil {
return visitErr
}
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag)
if err := writeFlag(flag, w); err != nil {
visitErr = err
return
}
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag)
if err := writeShortFlag(flag, w); err != nil {
visitErr = err
return
}
}
})
if visitErr != nil {
return visitErr
}
buf.WriteString("\n")
_, err = fmt.Fprintf(w, "\n")
return err
}
func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_flag=()\n")
func writeRequiredFlag(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil {
return err
}
flags := cmd.NonInheritedFlags()
var visitErr error
flags.VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
@ -412,93 +479,130 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
switch key {
case BashCompOneRequiredFlag:
format := " must_have_one_flag+=(\"--%s"
if flag.Value.Type() != "bool" {
b := (flag.Value.Type() == "bool")
if !b {
format += "="
}
format += "\")\n"
buf.WriteString(fmt.Sprintf(format, flag.Name))
if _, err := fmt.Fprintf(w, format, flag.Name); err != nil {
visitErr = err
return
}
if len(flag.Shorthand) > 0 {
buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil {
visitErr = err
return
}
}
}
}
})
return visitErr
}
func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_noun=()\n")
func writeRequiredNouns(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil {
return err
}
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil {
return err
}
}
return nil
}
func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" noun_aliases=()\n")
func writeArgAliases(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil {
return err
}
sort.Sort(sort.StringSlice(cmd.ArgAliases))
for _, value := range cmd.ArgAliases {
buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil {
return err
}
}
return nil
}
func gen(buf *bytes.Buffer, cmd *Command) {
func gen(cmd *Command, w io.Writer) error {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
gen(buf, c)
if err := gen(c, w); err != nil {
return err
}
}
commandName := cmd.CommandPath()
commandName = strings.Replace(commandName, " ", "_", -1)
commandName = strings.Replace(commandName, ":", "__", -1)
buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
writeCommands(buf, cmd)
writeFlags(buf, cmd)
writeRequiredFlag(buf, cmd)
writeRequiredNouns(buf, cmd)
writeArgAliases(buf, cmd)
buf.WriteString("}\n\n")
if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil {
return err
}
if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil {
return err
}
if err := writeCommands(cmd, w); err != nil {
return err
}
if err := writeFlags(cmd, w); err != nil {
return err
}
if err := writeRequiredFlag(cmd, w); err != nil {
return err
}
if err := writeRequiredNouns(cmd, w); err != nil {
return err
}
if err := writeArgAliases(cmd, w); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "}\n\n"); err != nil {
return err
}
return nil
}
// GenBashCompletion generates bash completion file and writes to the passed writer.
func (c *Command) GenBashCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
writePreamble(buf, c.Name())
if len(c.BashCompletionFunction) > 0 {
buf.WriteString(c.BashCompletionFunction + "\n")
}
gen(buf, c)
writePostscript(buf, c.Name())
_, err := buf.WriteTo(w)
func (cmd *Command) GenBashCompletion(w io.Writer) error {
if err := preamble(w, cmd.Name()); err != nil {
return err
}
if len(cmd.BashCompletionFunction) > 0 {
if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil {
return err
}
}
if err := gen(cmd, w); err != nil {
return err
}
return postscript(w, cmd.Name())
}
func nonCompletableFlag(flag *pflag.Flag) bool {
return flag.Hidden || len(flag.Deprecated) > 0
}
// GenBashCompletionFile generates bash completion file.
func (c *Command) GenBashCompletionFile(filename string) error {
func (cmd *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenBashCompletion(outFile)
return cmd.GenBashCompletion(outFile)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists.
func (c *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(c.Flags(), name)
func (cmd *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(cmd.Flags(), name)
}
// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists.
func (c *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(c.PersistentFlags(), name)
func (cmd *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(cmd.PersistentFlags(), name)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists.
@ -508,20 +612,20 @@ func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.Flags(), name, extensions...)
func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(cmd.Flags(), name, extensions...)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// Generated bash autocompletion will call the bash function f for the flag.
func (c *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(c.Flags(), name, f)
func (cmd *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(cmd.Flags(), name, f)
}
// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...)
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.

View File

@ -29,7 +29,6 @@ import (
var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent,
"rpad": rpad,
"gt": Gt,
@ -38,8 +37,7 @@ var templateFuncs = template.FuncMap{
var initializers []func()
// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
// to automatically enable in CLI tools.
// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
// Set this to true to enable it.
var EnablePrefixMatching = false
@ -47,22 +45,13 @@ var EnablePrefixMatching = false
// To disable sorting, set it to false.
var EnableCommandSorting = true
// MousetrapHelpText enables an information splash screen on Windows
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
var MousetrapHelpText string = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
templateFuncs[name] = tmplFunc
}
// AddTemplateFuncs adds multiple template functions that are available to Usage and
// AddTemplateFuncs adds multiple template functions availalble to Usage and
// Help template generation.
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
for k, v := range tmplFuncs {
@ -75,8 +64,6 @@ func OnInitialize(y ...func()) {
initializers = append(initializers, y...)
}
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
// ints and then compared.
@ -107,8 +94,6 @@ func Gt(a interface{}, b interface{}) bool {
return left > right
}
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
@ -129,8 +114,6 @@ func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
func appendIfNotPresent(s, stringToAppend string) string {
if strings.Contains(s, stringToAppend) {

File diff suppressed because it is too large Load Diff

View File

@ -11,8 +11,14 @@ import (
var preExecHookFn = preExecHook
// enables an information splash screen on Windows if the CLI is started from explorer.exe.
var MousetrapHelpText string = `This is a command line tool
You need to open cmd.exe and run it from there.
`
func preExecHook(c *Command) {
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
if mousetrap.StartedByExplorer() {
c.Print(MousetrapHelpText)
time.Sleep(5 * time.Second)
os.Exit(1)

Some files were not shown because too many files have changed in this diff Show More