forked from LaconicNetwork/kompose
bump(github.com/docker/libcompose) v0.3.0
This commit is contained in:
parent
223b4f37fa
commit
e341adce0e
123
Godeps/Godeps.json
generated
123
Godeps/Godeps.json
generated
@ -33,10 +33,6 @@
|
|||||||
"Comment": "v3.0.1",
|
"Comment": "v3.0.1",
|
||||||
"Rev": "31b736133b98f26d5e078ec9eb591666edfd091f"
|
"Rev": "31b736133b98f26d5e078ec9eb591666edfd091f"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
|
|
||||||
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-oidc/http",
|
"ImportPath": "github.com/coreos/go-oidc/http",
|
||||||
"Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b"
|
"Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b"
|
||||||
@ -59,28 +55,28 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||||
"Comment": "v11-2-gfa8411d",
|
"Comment": "v8-2-g4484981",
|
||||||
"Rev": "fa8411dcbcbad22b8542b0433914ef68b123f989"
|
"Rev": "4484981625c1a6a2ecb40a390fcb6a9bcfee76e3"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||||
"Comment": "v2-12-g3ac0863",
|
"Comment": "v2",
|
||||||
"Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef"
|
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/pkg/health",
|
"ImportPath": "github.com/coreos/pkg/health",
|
||||||
"Comment": "v2-12-g3ac0863",
|
"Comment": "v2",
|
||||||
"Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef"
|
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/pkg/httputil",
|
"ImportPath": "github.com/coreos/pkg/httputil",
|
||||||
"Comment": "v2-12-g3ac0863",
|
"Comment": "v2",
|
||||||
"Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef"
|
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/coreos/pkg/timeutil",
|
"ImportPath": "github.com/coreos/pkg/timeutil",
|
||||||
"Comment": "v2-12-g3ac0863",
|
"Comment": "v2",
|
||||||
"Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef"
|
"Rev": "7f080b6c11ac2d2347c3cd7521e810207ea1a041"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||||
@ -93,77 +89,77 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution",
|
"ImportPath": "github.com/docker/distribution",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/context",
|
"ImportPath": "github.com/docker/distribution/context",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/digest",
|
"ImportPath": "github.com/docker/distribution/digest",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/manifest",
|
"ImportPath": "github.com/docker/distribution/manifest",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/manifest/schema1",
|
"ImportPath": "github.com/docker/distribution/manifest/schema1",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/manifest/schema2",
|
"ImportPath": "github.com/docker/distribution/manifest/schema2",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/reference",
|
"ImportPath": "github.com/docker/distribution/reference",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/api/errcode",
|
"ImportPath": "github.com/docker/distribution/registry/api/errcode",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/api/v2",
|
"ImportPath": "github.com/docker/distribution/registry/api/v2",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/client",
|
"ImportPath": "github.com/docker/distribution/registry/client",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/client/auth",
|
"ImportPath": "github.com/docker/distribution/registry/client/auth",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/client/transport",
|
"ImportPath": "github.com/docker/distribution/registry/client/transport",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/storage/cache",
|
"ImportPath": "github.com/docker/distribution/registry/storage/cache",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/registry/storage/cache/memory",
|
"ImportPath": "github.com/docker/distribution/registry/storage/cache/memory",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/uuid",
|
"ImportPath": "github.com/docker/distribution/uuid",
|
||||||
"Comment": "docs-v2.4.1-2016-06-28-6-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -483,73 +479,78 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/config",
|
"ImportPath": "github.com/docker/libcompose/config",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/docker",
|
"ImportPath": "github.com/docker/libcompose/docker",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/docker/builder",
|
"ImportPath": "github.com/docker/libcompose/docker/builder",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/docker/client",
|
"ImportPath": "github.com/docker/libcompose/docker/client",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/docker/network",
|
"ImportPath": "github.com/docker/libcompose/docker/network",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/libcompose/docker/volume",
|
||||||
|
"Comment": "v0.3.0",
|
||||||
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/labels",
|
"ImportPath": "github.com/docker/libcompose/labels",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/logger",
|
"ImportPath": "github.com/docker/libcompose/logger",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/lookup",
|
"ImportPath": "github.com/docker/libcompose/lookup",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/project",
|
"ImportPath": "github.com/docker/libcompose/project",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/project/events",
|
"ImportPath": "github.com/docker/libcompose/project/events",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/project/options",
|
"ImportPath": "github.com/docker/libcompose/project/options",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/utils",
|
"ImportPath": "github.com/docker/libcompose/utils",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/version",
|
"ImportPath": "github.com/docker/libcompose/version",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libcompose/yaml",
|
"ImportPath": "github.com/docker/libcompose/yaml",
|
||||||
"Comment": "v0.2.0-198-gced6fdd",
|
"Comment": "v0.3.0",
|
||||||
"Rev": "ced6fddc87aaf1bbf3c01df4530b6069508b8602"
|
"Rev": "c10fa1d7ef4e0fe05b2bc9ca7444ea421b1df236"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/libtrust",
|
"ImportPath": "github.com/docker/libtrust",
|
||||||
@ -834,19 +835,19 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||||
"Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
|
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||||
"Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
|
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/common/model",
|
"ImportPath": "github.com/prometheus/common/model",
|
||||||
"Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
|
"Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/procfs",
|
"ImportPath": "github.com/prometheus/procfs",
|
||||||
"Rev": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5"
|
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/spf13/cobra",
|
"ImportPath": "github.com/spf13/cobra",
|
||||||
|
|||||||
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
@ -1 +0,0 @@
|
|||||||
*.coverprofile
|
|
||||||
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.1
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- go install github.com/onsi/ginkgo/ginkgo
|
|
||||||
|
|
||||||
script:
|
|
||||||
- export PATH=$HOME/gopath/bin:$PATH
|
|
||||||
- ginkgo -r -failOnPending -randomizeAllSpecs -race
|
|
||||||
176
vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
generated
vendored
176
vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
generated
vendored
@ -1,176 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
[](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
|
|
||||||
[](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
|
|
||||||
|
|
||||||
|
|
||||||
candiedyaml
|
|
||||||
===========
|
|
||||||
|
|
||||||
YAML for Go
|
|
||||||
|
|
||||||
A YAML 1.1 parser with support for YAML 1.2 features
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
```go
|
|
||||||
package myApp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
file, err := os.Open("path/to/some/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("File does not exist:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
document := new(interface{})
|
|
||||||
decoder := candiedyaml.NewDecoder(file)
|
|
||||||
err = decoder.Decode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to decode document:", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
|
|
||||||
|
|
||||||
fileToWrite, err := os.Create("path/to/some/new/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to open file for writing:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer fileToWrite.Close()
|
|
||||||
|
|
||||||
encoder := candiedyaml.NewEncoder(fileToWrite)
|
|
||||||
err = encoder.Encode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to encode document:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
```
|
|
||||||
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
@ -1,834 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new parser object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
|
||||||
*parser = yaml_parser_t{
|
|
||||||
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
|
|
||||||
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a parser object.
|
|
||||||
*/
|
|
||||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
|
||||||
*parser = yaml_parser_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
if parser.input_pos == len(parser.input) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
n := copy(buffer, parser.input[parser.input_pos:])
|
|
||||||
parser.input_pos += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
return parser.input_reader.Read(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_string_read_handler
|
|
||||||
|
|
||||||
parser.input = input
|
|
||||||
parser.input_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a reader input
|
|
||||||
*/
|
|
||||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_file_read_handler
|
|
||||||
parser.input_reader = reader
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the source encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
|
||||||
if parser.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new emitter object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{
|
|
||||||
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
|
|
||||||
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
|
|
||||||
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
|
|
||||||
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
_, err := emitter.output_writer.Write(buffer)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_string_write_handler
|
|
||||||
emitter.output_buffer = buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a file output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_writer_write_handler
|
|
||||||
emitter.output_writer = w
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic output handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the output encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
|
||||||
if emitter.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the canonical output style.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
|
||||||
emitter.canonical = canonical
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the indentation increment.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
|
||||||
if indent < 2 || indent > 9 {
|
|
||||||
indent = 2
|
|
||||||
}
|
|
||||||
emitter.best_indent = indent
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line width.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
|
||||||
if width < 0 {
|
|
||||||
width = -1
|
|
||||||
}
|
|
||||||
emitter.best_width = width
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set if unescaped non-ASCII characters are allowed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
|
||||||
emitter.unicode = unicode
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line break character.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
|
||||||
emitter.line_break = line_break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a token object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_token_delete(yaml_token_t *token)
|
|
||||||
// {
|
|
||||||
// assert(token); /* Non-NULL token object expected. */
|
|
||||||
//
|
|
||||||
// switch (token.type)
|
|
||||||
// {
|
|
||||||
// case yaml_TAG_DIRECTIVE_TOKEN:
|
|
||||||
// yaml_free(token.data.tag_directive.handle);
|
|
||||||
// yaml_free(token.data.tag_directive.prefix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ALIAS_TOKEN:
|
|
||||||
// yaml_free(token.data.alias.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ANCHOR_TOKEN:
|
|
||||||
// yaml_free(token.data.anchor.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_TAG_TOKEN:
|
|
||||||
// yaml_free(token.data.tag.handle);
|
|
||||||
// yaml_free(token.data.tag.suffix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_SCALAR_TOKEN:
|
|
||||||
// yaml_free(token.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// default:
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// memset(token, 0, sizeof(yaml_token_t));
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if a string is a valid UTF-8 sequence.
|
|
||||||
*
|
|
||||||
* Check 'reader.c' for more details on UTF-8 encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// static int
|
|
||||||
// yaml_check_utf8(yaml_char_t *start, size_t length)
|
|
||||||
// {
|
|
||||||
// yaml_char_t *end = start+length;
|
|
||||||
// yaml_char_t *pointer = start;
|
|
||||||
//
|
|
||||||
// while (pointer < end) {
|
|
||||||
// unsigned char octet;
|
|
||||||
// unsigned int width;
|
|
||||||
// unsigned int value;
|
|
||||||
// size_t k;
|
|
||||||
//
|
|
||||||
// octet = pointer[0];
|
|
||||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
|
||||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
|
||||||
// if (!width) return 0;
|
|
||||||
// if (pointer+width > end) return 0;
|
|
||||||
// for (k = 1; k < width; k ++) {
|
|
||||||
// octet = pointer[k];
|
|
||||||
// if ((octet & 0xC0) != 0x80) return 0;
|
|
||||||
// value = (value << 6) + (octet & 0x3F);
|
|
||||||
// }
|
|
||||||
// if (!((width == 1) ||
|
|
||||||
// (width == 2 && value >= 0x80) ||
|
|
||||||
// (width == 3 && value >= 0x800) ||
|
|
||||||
// (width == 4 && value >= 0x10000))) return 0;
|
|
||||||
//
|
|
||||||
// pointer += width;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_START_EVENT,
|
|
||||||
encoding: encoding,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_start_event_initialize(event *yaml_event_t,
|
|
||||||
version_directive *yaml_version_directive_t,
|
|
||||||
tag_directives []yaml_tag_directive_t,
|
|
||||||
implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_START_EVENT,
|
|
||||||
version_directive: version_directive,
|
|
||||||
tag_directives: tag_directives,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_END_EVENT,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create ALIAS.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_ALIAS_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SCALAR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_scalar_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte,
|
|
||||||
value []byte,
|
|
||||||
plain_implicit bool, quoted_implicit bool,
|
|
||||||
style yaml_scalar_style_t) {
|
|
||||||
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SCALAR_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
value: value,
|
|
||||||
implicit: plain_implicit,
|
|
||||||
quoted_implicit: quoted_implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy an event object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_event_delete(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Create a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// func yaml_document_initialize(document *yaml_document_t,
|
|
||||||
// version_directive *yaml_version_directive_t,
|
|
||||||
// tag_directives []yaml_tag_directive_t,
|
|
||||||
// start_implicit, end_implicit bool) bool {
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_t *start;
|
|
||||||
// yaml_node_t *end;
|
|
||||||
// yaml_node_t *top;
|
|
||||||
// } nodes = { NULL, NULL, NULL };
|
|
||||||
// yaml_version_directive_t *version_directive_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_tag_directive_t *start;
|
|
||||||
// yaml_tag_directive_t *end;
|
|
||||||
// yaml_tag_directive_t *top;
|
|
||||||
// } tag_directives_copy = { NULL, NULL, NULL };
|
|
||||||
// yaml_tag_directive_t value = { NULL, NULL };
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert((tag_directives_start && tag_directives_end) ||
|
|
||||||
// (tag_directives_start == tag_directives_end));
|
|
||||||
// /* Valid tag directives are expected. */
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// if (version_directive) {
|
|
||||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
|
|
||||||
// if (!version_directive_copy) goto error;
|
|
||||||
// version_directive_copy.major = version_directive.major;
|
|
||||||
// version_directive_copy.minor = version_directive.minor;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (tag_directives_start != tag_directives_end) {
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
|
||||||
// goto error;
|
|
||||||
// for (tag_directive = tag_directives_start;
|
|
||||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
|
||||||
// assert(tag_directive.handle);
|
|
||||||
// assert(tag_directive.prefix);
|
|
||||||
// if (!yaml_check_utf8(tag_directive.handle,
|
|
||||||
// strlen((char *)tag_directive.handle)))
|
|
||||||
// goto error;
|
|
||||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
|
||||||
// strlen((char *)tag_directive.prefix)))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = yaml_strdup(tag_directive.handle);
|
|
||||||
// value.prefix = yaml_strdup(tag_directive.prefix);
|
|
||||||
// if (!value.handle || !value.prefix) goto error;
|
|
||||||
// if (!PUSH(&context, tag_directives_copy, value))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = NULL;
|
|
||||||
// value.prefix = NULL;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
|
||||||
// tag_directives_copy.start, tag_directives_copy.top,
|
|
||||||
// start_implicit, end_implicit, mark, mark);
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, nodes);
|
|
||||||
// yaml_free(version_directive_copy);
|
|
||||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
|
||||||
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Destroy a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_document_delete(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
//
|
|
||||||
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
|
||||||
// yaml_node_t node = POP(&context, document.nodes);
|
|
||||||
// yaml_free(node.tag);
|
|
||||||
// switch (node.type) {
|
|
||||||
// case yaml_SCALAR_NODE:
|
|
||||||
// yaml_free(node.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
// case yaml_SEQUENCE_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.sequence.items);
|
|
||||||
// break;
|
|
||||||
// case yaml_MAPPING_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.mapping.pairs);
|
|
||||||
// break;
|
|
||||||
// default:
|
|
||||||
// assert(0); /* Should not happen. */
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, document.nodes);
|
|
||||||
//
|
|
||||||
// yaml_free(document.version_directive);
|
|
||||||
// for (tag_directive = document.tag_directives.start;
|
|
||||||
// tag_directive != document.tag_directives.end;
|
|
||||||
// tag_directive++) {
|
|
||||||
// yaml_free(tag_directive.handle);
|
|
||||||
// yaml_free(tag_directive.prefix);
|
|
||||||
// }
|
|
||||||
// yaml_free(document.tag_directives.start);
|
|
||||||
//
|
|
||||||
// memset(document, 0, sizeof(yaml_document_t));
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get a document node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_node(document *yaml_document_t, int index)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
|
||||||
// return document.nodes.start + index - 1;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get the root object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_root_node(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (document.nodes.top != document.nodes.start) {
|
|
||||||
// return document.nodes.start;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a scalar node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_scalar(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_char_t *value, int length,
|
|
||||||
// yaml_scalar_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// yaml_char_t *value_copy = NULL;
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert(value); /* Non-NULL value is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (length < 0) {
|
|
||||||
// length = strlen((char *)value);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(value, length)) goto error;
|
|
||||||
// value_copy = yaml_malloc(length+1);
|
|
||||||
// if (!value_copy) goto error;
|
|
||||||
// memcpy(value_copy, value, length);
|
|
||||||
// value_copy[length] = '\0';
|
|
||||||
//
|
|
||||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
// yaml_free(value_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a sequence node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_sequence(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_sequence_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_item_t *start;
|
|
||||||
// yaml_node_item_t *end;
|
|
||||||
// yaml_node_item_t *top;
|
|
||||||
// } items = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, items);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a mapping node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_mapping(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_mapping_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_pair_t *start;
|
|
||||||
// yaml_node_pair_t *end;
|
|
||||||
// yaml_node_pair_t *top;
|
|
||||||
// } pairs = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, pairs);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append an item to a sequence node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_sequence_item(document *yaml_document_t,
|
|
||||||
// int sequence, int item)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(sequence > 0
|
|
||||||
// && document.nodes.start + sequence <= document.nodes.top);
|
|
||||||
// /* Valid sequence id is required. */
|
|
||||||
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
|
|
||||||
// /* A sequence node is required. */
|
|
||||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
|
|
||||||
// /* Valid item id is required. */
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append a pair of a key and a value to a mapping node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_mapping_pair(document *yaml_document_t,
|
|
||||||
// int mapping, int key, int value)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// yaml_node_pair_t pair;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(mapping > 0
|
|
||||||
// && document.nodes.start + mapping <= document.nodes.top);
|
|
||||||
// /* Valid mapping id is required. */
|
|
||||||
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
|
|
||||||
// /* A mapping node is required. */
|
|
||||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
|
|
||||||
// /* Valid key id is required. */
|
|
||||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
|
|
||||||
// /* Valid value id is required. */
|
|
||||||
//
|
|
||||||
// pair.key = key;
|
|
||||||
// pair.value = value;
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
@ -1,622 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalYAML(tag string, value interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Number represents a JSON number literal.
|
|
||||||
type Number string
|
|
||||||
|
|
||||||
// String returns the literal text of the number.
|
|
||||||
func (n Number) String() string { return string(n) }
|
|
||||||
|
|
||||||
// Float64 returns the number as a float64.
|
|
||||||
func (n Number) Float64() (float64, error) {
|
|
||||||
return strconv.ParseFloat(string(n), 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 returns the number as an int64.
|
|
||||||
func (n Number) Int64() (int64, error) {
|
|
||||||
return strconv.ParseInt(string(n), 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Decoder struct {
|
|
||||||
parser yaml_parser_t
|
|
||||||
event yaml_event_t
|
|
||||||
replay_events []yaml_event_t
|
|
||||||
useNumber bool
|
|
||||||
|
|
||||||
anchors map[string][]yaml_event_t
|
|
||||||
tracking_anchors [][]yaml_event_t
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParserError struct {
|
|
||||||
ErrorType YAML_error_type_t
|
|
||||||
Context string
|
|
||||||
ContextMark YAML_mark_t
|
|
||||||
Problem string
|
|
||||||
ProblemMark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ParserError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
type UnexpectedEventError struct {
|
|
||||||
Value string
|
|
||||||
EventType yaml_event_type_t
|
|
||||||
At YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnexpectedEventError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func recovery(err *error) {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if _, ok := r.(runtime.Error); ok {
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmpError error
|
|
||||||
switch r := r.(type) {
|
|
||||||
case error:
|
|
||||||
tmpError = r
|
|
||||||
case string:
|
|
||||||
tmpError = errors.New(r)
|
|
||||||
default:
|
|
||||||
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
|
|
||||||
}
|
|
||||||
|
|
||||||
*err = tmpError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
|
||||||
d := NewDecoder(bytes.NewBuffer(data))
|
|
||||||
return d.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
d := &Decoder{
|
|
||||||
anchors: make(map[string][]yaml_event_t),
|
|
||||||
tracking_anchors: make([][]yaml_event_t, 1),
|
|
||||||
}
|
|
||||||
yaml_parser_initialize(&d.parser)
|
|
||||||
yaml_parser_set_input_reader(&d.parser, r)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) Decode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
|
||||||
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type == yaml_NO_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_STREAM_START_EVENT {
|
|
||||||
return errors.New("Invalid stream")
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
d.document(rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) UseNumber() { d.useNumber = true }
|
|
||||||
|
|
||||||
func (d *Decoder) error(err error) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) nextEvent() {
|
|
||||||
if d.event.event_type == yaml_STREAM_END_EVENT {
|
|
||||||
d.error(errors.New("The stream is closed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.replay_events != nil {
|
|
||||||
d.event = d.replay_events[0]
|
|
||||||
if len(d.replay_events) == 1 {
|
|
||||||
d.replay_events = nil
|
|
||||||
} else {
|
|
||||||
d.replay_events = d.replay_events[1:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !yaml_parser_parse(&d.parser, &d.event) {
|
|
||||||
yaml_event_delete(&d.event)
|
|
||||||
|
|
||||||
d.error(&ParserError{
|
|
||||||
ErrorType: d.parser.error,
|
|
||||||
Context: d.parser.context,
|
|
||||||
ContextMark: d.parser.context_mark,
|
|
||||||
Problem: d.parser.problem,
|
|
||||||
ProblemMark: d.parser.problem_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
last := len(d.tracking_anchors)
|
|
||||||
// skip aliases when tracking an anchor
|
|
||||||
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
|
|
||||||
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) document(rv reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) parse(rv reflect.Value) {
|
|
||||||
if !rv.IsValid() {
|
|
||||||
// skip ahead since we cannot store
|
|
||||||
d.valueInterface()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.sequence(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.mapping(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.scalar(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
d.alias(rv)
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
default:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) begin_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := []yaml_event_t{d.event}
|
|
||||||
d.tracking_anchors = append(d.tracking_anchors, events)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) end_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := d.tracking_anchors[len(d.tracking_anchors)-1]
|
|
||||||
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
|
|
||||||
// remove the anchor, replaying events shouldn't have anchors
|
|
||||||
events[0].anchor = nil
|
|
||||||
// we went one too many, remove the extra event
|
|
||||||
events = events[:len(events)-1]
|
|
||||||
// if nested, append to all the other anchors
|
|
||||||
for i, e := range d.tracking_anchors {
|
|
||||||
d.tracking_anchors[i] = append(e, events...)
|
|
||||||
}
|
|
||||||
d.anchors[anchor] = events
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
|
|
||||||
// If v is a named type and is addressable,
|
|
||||||
// start with its address, so that if the type has pointer methods,
|
|
||||||
// we find them.
|
|
||||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
// Load value from interface, but only if the result will be
|
|
||||||
// usefully addressable.
|
|
||||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
|
||||||
e := v.Elem()
|
|
||||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
|
||||||
v = e
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type().NumMethod() > 0 {
|
|
||||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
|
||||||
var temp interface{}
|
|
||||||
return u, reflect.ValueOf(&temp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) sequence(v reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Check type of target.
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.NumMethod() == 0 {
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
v.Set(reflect.ValueOf(d.sequenceInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise it's invalid.
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
|
|
||||||
case reflect.Array:
|
|
||||||
case reflect.Slice:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get element of array, growing if necessary.
|
|
||||||
if v.Kind() == reflect.Slice {
|
|
||||||
// Grow slice if necessary
|
|
||||||
if i >= v.Cap() {
|
|
||||||
newcap := v.Cap() + v.Cap()/2
|
|
||||||
if newcap < 4 {
|
|
||||||
newcap = 4
|
|
||||||
}
|
|
||||||
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
|
|
||||||
reflect.Copy(newv, v)
|
|
||||||
v.Set(newv)
|
|
||||||
}
|
|
||||||
if i >= v.Len() {
|
|
||||||
v.SetLen(i + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
// Decode into element.
|
|
||||||
d.parse(v.Index(i))
|
|
||||||
} else {
|
|
||||||
// Ran out of fixed array: skip.
|
|
||||||
d.parse(reflect.Value{})
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
if v.Kind() == reflect.Array {
|
|
||||||
// Array. Zero the rest.
|
|
||||||
z := reflect.Zero(v.Type().Elem())
|
|
||||||
for ; i < v.Len(); i++ {
|
|
||||||
v.Index(i).Set(z)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v.SetLen(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == 0 && v.Kind() == reflect.Slice {
|
|
||||||
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mapping(v reflect.Value) {
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
|
|
||||||
v.Set(reflect.ValueOf(d.mappingInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check type of target: struct or map[X]Y
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
d.mappingStruct(v)
|
|
||||||
return
|
|
||||||
case reflect.Map:
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
mapt := v.Type()
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.MakeMap(mapt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
keyt := mapt.Key()
|
|
||||||
mapElemt := mapt.Elem()
|
|
||||||
|
|
||||||
var mapElem reflect.Value
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := reflect.New(keyt)
|
|
||||||
d.parse(key.Elem())
|
|
||||||
|
|
||||||
if !mapElem.IsValid() {
|
|
||||||
mapElem = reflect.New(mapElemt).Elem()
|
|
||||||
} else {
|
|
||||||
mapElem.Set(reflect.Zero(mapElemt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.parse(mapElem)
|
|
||||||
|
|
||||||
v.SetMapIndex(key.Elem(), mapElem)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mappingStruct(v reflect.Value) {
|
|
||||||
|
|
||||||
structt := v.Type()
|
|
||||||
fields := cachedTypeFields(structt)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
d.parse(reflect.ValueOf(&key))
|
|
||||||
|
|
||||||
// Figure out field corresponding to key.
|
|
||||||
var subv reflect.Value
|
|
||||||
|
|
||||||
var f *field
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if f != nil {
|
|
||||||
subv = v
|
|
||||||
for _, i := range f.index {
|
|
||||||
if subv.Kind() == reflect.Ptr {
|
|
||||||
if subv.IsNil() {
|
|
||||||
subv.Set(reflect.New(subv.Type().Elem()))
|
|
||||||
}
|
|
||||||
subv = subv.Elem()
|
|
||||||
}
|
|
||||||
subv = subv.Field(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.parse(subv)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalar(v reflect.Value) {
|
|
||||||
val := string(d.event.value)
|
|
||||||
wantptr := null_values[val]
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, wantptr)
|
|
||||||
|
|
||||||
var tag string
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, pv = d.indirect(pv, wantptr)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tag, err = resolve(d.event, v, d.useNumber)
|
|
||||||
if err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) alias(rv reflect.Value) {
|
|
||||||
val, ok := d.anchors[string(d.event.anchor)]
|
|
||||||
if !ok {
|
|
||||||
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.replay_events = val
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) valueInterface() interface{} {
|
|
||||||
var v interface{}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.sequenceInterface()
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.mappingInterface()
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.scalarInterface()
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
rv := reflect.ValueOf(&v)
|
|
||||||
d.alias(rv)
|
|
||||||
return v
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalarInterface() interface{} {
|
|
||||||
_, v := resolveInterface(d.event, d.useNumber)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// sequenceInterface is like sequence but returns []interface{}.
|
|
||||||
func (d *Decoder) sequenceInterface() []interface{} {
|
|
||||||
var v = make([]interface{}, 0)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
v = append(v, d.valueInterface())
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// mappingInterface is like mapping but returns map[interface{}]interface{}.
|
|
||||||
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
|
|
||||||
m := make(map[interface{}]interface{})
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
key := d.valueInterface()
|
|
||||||
|
|
||||||
// Read value.
|
|
||||||
m[key] = d.valueInterface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
2072
vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
generated
vendored
2072
vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
generated
vendored
File diff suppressed because it is too large
Load Diff
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
@ -1,395 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
timeTimeType = reflect.TypeOf(time.Time{})
|
|
||||||
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
|
|
||||||
numberType = reflect.TypeOf(Number(""))
|
|
||||||
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
|
|
||||||
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
|
|
||||||
|
|
||||||
shortTags = map[string]string{
|
|
||||||
yaml_NULL_TAG: "!!null",
|
|
||||||
yaml_BOOL_TAG: "!!bool",
|
|
||||||
yaml_STR_TAG: "!!str",
|
|
||||||
yaml_INT_TAG: "!!int",
|
|
||||||
yaml_FLOAT_TAG: "!!float",
|
|
||||||
yaml_TIMESTAMP_TAG: "!!timestamp",
|
|
||||||
yaml_SEQ_TAG: "!!seq",
|
|
||||||
yaml_MAP_TAG: "!!map",
|
|
||||||
yaml_BINARY_TAG: "!!binary",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type Marshaler interface {
|
|
||||||
MarshalYAML() (tag string, value interface{}, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Encoder writes JSON objects to an output stream.
|
|
||||||
type Encoder struct {
|
|
||||||
w io.Writer
|
|
||||||
emitter yaml_emitter_t
|
|
||||||
event yaml_event_t
|
|
||||||
flow bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func Marshal(v interface{}) ([]byte, error) {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
e := NewEncoder(&b)
|
|
||||||
err := e.Encode(v)
|
|
||||||
return b.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder that writes to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
e := &Encoder{w: w}
|
|
||||||
yaml_emitter_initialize(&e.emitter)
|
|
||||||
yaml_emitter_set_output_writer(&e.emitter, e.w)
|
|
||||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
|
||||||
e.emit()
|
|
||||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(v), true)
|
|
||||||
|
|
||||||
yaml_document_end_event_initialize(&e.event, true)
|
|
||||||
e.emit()
|
|
||||||
e.emitter.open_ended = false
|
|
||||||
yaml_stream_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emit() {
|
|
||||||
if !yaml_emitter_emit(&e.emitter, &e.event) {
|
|
||||||
panic("bad emit")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
|
|
||||||
vt := v.Type()
|
|
||||||
|
|
||||||
if vt.Implements(marshalerType) {
|
|
||||||
e.emitMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if vt.Kind() != reflect.Ptr && allowAddr {
|
|
||||||
if reflect.PtrTo(vt).Implements(marshalerType) {
|
|
||||||
e.emitAddrMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), allowAddr)
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
e.emitMap(tag, v)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), true)
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
e.emitStruct(tag, v)
|
|
||||||
case reflect.Slice:
|
|
||||||
e.emitSlice(tag, v)
|
|
||||||
case reflect.String:
|
|
||||||
e.emitString(tag, v)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
e.emitInt(tag, v)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
e.emitUint(tag, v)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
e.emitFloat(tag, v)
|
|
||||||
case reflect.Bool:
|
|
||||||
e.emitBool(tag, v)
|
|
||||||
default:
|
|
||||||
panic("Can't marshal type yet: " + v.Type().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMap(tag string, v reflect.Value) {
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
var keys stringValues = v.MapKeys()
|
|
||||||
sort.Sort(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
e.marshal("", k, true)
|
|
||||||
e.marshal("", v.MapIndex(k), true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == timeTimeType {
|
|
||||||
e.emitTime(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := cachedTypeFields(v.Type())
|
|
||||||
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
for _, f := range fields {
|
|
||||||
fv := fieldByIndex(v, f.index)
|
|
||||||
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(f.name), true)
|
|
||||||
e.flow = f.flow
|
|
||||||
e.marshal("", fv, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitTime(tag string, v reflect.Value) {
|
|
||||||
t := v.Interface().(time.Time)
|
|
||||||
bytes, _ := t.MarshalText()
|
|
||||||
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmptyValue(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) mapping(tag string, f func()) {
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_MAPPING_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_MAPPING_STYLE
|
|
||||||
}
|
|
||||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
f()
|
|
||||||
|
|
||||||
yaml_mapping_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == byteSliceType {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
}
|
|
||||||
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
n := v.Len()
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
e.marshal("", v.Index(i), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_sequence_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Bytes()
|
|
||||||
|
|
||||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
|
|
||||||
|
|
||||||
base64.StdEncoding.Encode(dst, s)
|
|
||||||
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitString(tag string, v reflect.Value) {
|
|
||||||
var style yaml_scalar_style_t
|
|
||||||
s := v.String()
|
|
||||||
|
|
||||||
if nonPrintable.MatchString(s) {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type() == numberType {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
event := yaml_event_t{
|
|
||||||
implicit: true,
|
|
||||||
value: []byte(s),
|
|
||||||
}
|
|
||||||
|
|
||||||
rtag, _ := resolveInterface(event, false)
|
|
||||||
if tag == "" && rtag != yaml_STR_TAG {
|
|
||||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
} else if multiline.MatchString(s) {
|
|
||||||
style = yaml_LITERAL_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, style)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBool(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatBool(v.Bool())
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitInt(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatInt(v.Int(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitUint(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatUint(v.Uint(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
|
|
||||||
f := v.Float()
|
|
||||||
|
|
||||||
var s string
|
|
||||||
switch {
|
|
||||||
case math.IsNaN(f):
|
|
||||||
s = ".nan"
|
|
||||||
case math.IsInf(f, 1):
|
|
||||||
s = "+.inf"
|
|
||||||
case math.IsInf(f, -1):
|
|
||||||
s = "-.inf"
|
|
||||||
default:
|
|
||||||
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitNil() {
|
|
||||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
|
||||||
implicit := tag == ""
|
|
||||||
if !implicit {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
|
|
||||||
stag := shortTags[tag]
|
|
||||||
if stag == "" {
|
|
||||||
stag = tag
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
if m == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
|
|
||||||
if !v.CanAddr() {
|
|
||||||
e.marshal(tag, v, false)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
va := v.Addr()
|
|
||||||
if va.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
||||||
19
vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
generated
vendored
19
vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2006 Kirill Simonov
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
||||||
of the Software, and to permit persons to whom the Software is furnished to do
|
|
||||||
so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
File diff suppressed because it is too large
Load Diff
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
@ -1,465 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the reader error and return 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
|
|
||||||
offset int, value int) bool {
|
|
||||||
parser.error = yaml_READER_ERROR
|
|
||||||
parser.problem = problem
|
|
||||||
parser.problem_offset = offset
|
|
||||||
parser.problem_value = value
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Byte order marks.
|
|
||||||
*/
|
|
||||||
const (
|
|
||||||
BOM_UTF8 = "\xef\xbb\xbf"
|
|
||||||
BOM_UTF16LE = "\xff\xfe"
|
|
||||||
BOM_UTF16BE = "\xfe\xff"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
|
||||||
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
|
||||||
/* Ensure that we had enough bytes in the raw buffer. */
|
|
||||||
for !parser.eof &&
|
|
||||||
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the encoding. */
|
|
||||||
raw := parser.raw_buffer
|
|
||||||
pos := parser.raw_buffer_pos
|
|
||||||
remaining := len(raw) - pos
|
|
||||||
if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
|
|
||||||
parser.encoding = yaml_UTF16LE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
|
|
||||||
parser.encoding = yaml_UTF16BE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 3 &&
|
|
||||||
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
parser.raw_buffer_pos += 3
|
|
||||||
parser.offset += 3
|
|
||||||
} else {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the raw buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
|
||||||
size_read := 0
|
|
||||||
|
|
||||||
/* Return if the raw buffer is full. */
|
|
||||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return on EOF. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the remaining bytes in the raw buffer to the beginning. */
|
|
||||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
|
||||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
|
||||||
}
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
|
||||||
parser.raw_buffer_pos = 0
|
|
||||||
|
|
||||||
/* Call the read handler to fill the buffer. */
|
|
||||||
size_read, err := parser.read_handler(parser,
|
|
||||||
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
parser.eof = true
|
|
||||||
} else if err != nil {
|
|
||||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that the buffer contains at least `length` characters.
|
|
||||||
* Return 1 on success, 0 on failure.
|
|
||||||
*
|
|
||||||
* The length is supposed to be significantly less that the buffer size.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
|
||||||
/* Read handler must be set. */
|
|
||||||
if parser.read_handler == nil {
|
|
||||||
panic("read handler must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
|
|
||||||
|
|
||||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return if the buffer contains enough characters. */
|
|
||||||
|
|
||||||
if parser.unread >= length {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the input encoding if it is not known yet. */
|
|
||||||
|
|
||||||
if parser.encoding == yaml_ANY_ENCODING {
|
|
||||||
if !yaml_parser_determine_encoding(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the unread characters to the beginning of the buffer. */
|
|
||||||
buffer_end := len(parser.buffer)
|
|
||||||
if 0 < parser.buffer_pos &&
|
|
||||||
parser.buffer_pos < buffer_end {
|
|
||||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
|
||||||
buffer_end -= parser.buffer_pos
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
} else if parser.buffer_pos == buffer_end {
|
|
||||||
buffer_end = 0
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
|
||||||
|
|
||||||
/* Fill the buffer until it has enough characters. */
|
|
||||||
first := true
|
|
||||||
for parser.unread < length {
|
|
||||||
/* Fill the raw buffer if necessary. */
|
|
||||||
|
|
||||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
first = false
|
|
||||||
|
|
||||||
/* Decode the raw buffer. */
|
|
||||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
|
||||||
var value rune
|
|
||||||
var w int
|
|
||||||
|
|
||||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
|
||||||
incomplete := false
|
|
||||||
|
|
||||||
/* Decode the next character. */
|
|
||||||
|
|
||||||
switch parser.encoding {
|
|
||||||
case yaml_UTF8_ENCODING:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Decode a UTF-8 character. Check RFC 3629
|
|
||||||
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
|
||||||
*
|
|
||||||
* The following table (taken from the RFC) is used for
|
|
||||||
* decoding.
|
|
||||||
*
|
|
||||||
* Char. number range | UTF-8 octet sequence
|
|
||||||
* (hexadecimal) | (binary)
|
|
||||||
* --------------------+------------------------------------
|
|
||||||
* 0000 0000-0000 007F | 0xxxxxxx
|
|
||||||
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
|
||||||
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
|
||||||
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|
||||||
*
|
|
||||||
* Additionally, the characters in the range 0xD800-0xDFFF
|
|
||||||
* are prohibited as they are reserved for use with UTF-16
|
|
||||||
* surrogate pairs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Determine the length of the UTF-8 sequence. */
|
|
||||||
|
|
||||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
|
||||||
w = width(octet)
|
|
||||||
|
|
||||||
/* Check if the leading octet is valid. */
|
|
||||||
|
|
||||||
if w == 0 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid leading UTF-8 octet",
|
|
||||||
parser.offset, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains an incomplete character. */
|
|
||||||
|
|
||||||
if w > raw_unread {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-8 octet sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the leading octet. */
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
value = rune(octet & 0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
value = rune(octet & 0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
value = rune(octet & 0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
value = rune(octet & 0x07)
|
|
||||||
default:
|
|
||||||
value = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check and decode the trailing octets. */
|
|
||||||
|
|
||||||
for k := 1; k < w; k++ {
|
|
||||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
|
||||||
|
|
||||||
/* Check if the octet is valid. */
|
|
||||||
|
|
||||||
if (octet & 0xC0) != 0x80 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid trailing UTF-8 octet",
|
|
||||||
parser.offset+k, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the octet. */
|
|
||||||
|
|
||||||
value = (value << 6) + rune(octet&0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the length of the sequence against the value. */
|
|
||||||
switch {
|
|
||||||
case w == 1:
|
|
||||||
case w == 2 && value >= 0x80:
|
|
||||||
case w == 3 && value >= 0x800:
|
|
||||||
case w == 4 && value >= 0x10000:
|
|
||||||
default:
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid length of a UTF-8 sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the range of the value. */
|
|
||||||
|
|
||||||
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid Unicode character",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
case yaml_UTF16LE_ENCODING,
|
|
||||||
yaml_UTF16BE_ENCODING:
|
|
||||||
|
|
||||||
var low, high int
|
|
||||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
high, low = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The UTF-16 encoding is not as simple as one might
|
|
||||||
* naively think. Check RFC 2781
|
|
||||||
* (http://www.ietf.org/rfc/rfc2781.txt).
|
|
||||||
*
|
|
||||||
* Normally, two subsequent bytes describe a Unicode
|
|
||||||
* character. However a special technique (called a
|
|
||||||
* surrogate pair) is used for specifying character
|
|
||||||
* values larger than 0xFFFF.
|
|
||||||
*
|
|
||||||
* A surrogate pair consists of two pseudo-characters:
|
|
||||||
* high surrogate area (0xD800-0xDBFF)
|
|
||||||
* low surrogate area (0xDC00-0xDFFF)
|
|
||||||
*
|
|
||||||
* The following formulas are used for decoding
|
|
||||||
* and encoding characters using surrogate pairs:
|
|
||||||
*
|
|
||||||
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
|
||||||
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
|
||||||
* W1 = 110110yyyyyyyyyy
|
|
||||||
* W2 = 110111xxxxxxxxxx
|
|
||||||
*
|
|
||||||
* where U is the character value, W1 is the high surrogate
|
|
||||||
* area, W2 is the low surrogate area.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Check for incomplete UTF-16 character. */
|
|
||||||
|
|
||||||
if raw_unread < 2 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 character",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the character. */
|
|
||||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
|
||||||
|
|
||||||
/* Check for unexpected low surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"unexpected low surrogate area",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for a high surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xD800 {
|
|
||||||
|
|
||||||
w = 4
|
|
||||||
|
|
||||||
/* Check for incomplete surrogate pair. */
|
|
||||||
|
|
||||||
if raw_unread < 4 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 surrogate pair",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the next character. */
|
|
||||||
|
|
||||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
|
||||||
|
|
||||||
/* Check for a low surrogate area. */
|
|
||||||
|
|
||||||
if (value2 & 0xFC00) != 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"expected low surrogate area",
|
|
||||||
parser.offset+2, int(value2))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generate the value of the surrogate pair. */
|
|
||||||
|
|
||||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
|
||||||
} else {
|
|
||||||
w = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("Impossible") /* Impossible. */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains enough bytes to form a character. */
|
|
||||||
|
|
||||||
if incomplete {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the character is in the allowed range:
|
|
||||||
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
|
||||||
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
|
||||||
* | [#x10000-#x10FFFF] (32 bit)
|
|
||||||
*/
|
|
||||||
|
|
||||||
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
|
|
||||||
(value >= 0x20 && value <= 0x7E) ||
|
|
||||||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
|
|
||||||
(value >= 0xE000 && value <= 0xFFFD) ||
|
|
||||||
(value >= 0x10000 && value <= 0x10FFFF)) {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"control characters are not allowed",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the raw pointers. */
|
|
||||||
|
|
||||||
parser.raw_buffer_pos += w
|
|
||||||
parser.offset += w
|
|
||||||
|
|
||||||
/* Finally put the character into the buffer. */
|
|
||||||
|
|
||||||
/* 0000 0000-0000 007F . 0xxxxxxx */
|
|
||||||
if value <= 0x7F {
|
|
||||||
parser.buffer[buffer_end] = byte(value)
|
|
||||||
} else if value <= 0x7FF {
|
|
||||||
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else if value <= 0xFFFF {
|
|
||||||
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else {
|
|
||||||
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_end += w
|
|
||||||
parser.unread++
|
|
||||||
}
|
|
||||||
|
|
||||||
/* On EOF, put NUL into the buffer and return. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
parser.buffer[buffer_end] = 0
|
|
||||||
buffer_end++
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
parser.unread++
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
@ -1,449 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var byteSliceType = reflect.TypeOf([]byte(nil))
|
|
||||||
|
|
||||||
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
|
|
||||||
var bool_values map[string]bool
|
|
||||||
var null_values map[string]bool
|
|
||||||
|
|
||||||
var signs = []byte{'-', '+'}
|
|
||||||
var nulls = []byte{'~', 'n', 'N'}
|
|
||||||
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
|
|
||||||
|
|
||||||
var timestamp_regexp *regexp.Regexp
|
|
||||||
var ymd_regexp *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bool_values = make(map[string]bool)
|
|
||||||
bool_values["y"] = true
|
|
||||||
bool_values["yes"] = true
|
|
||||||
bool_values["n"] = false
|
|
||||||
bool_values["no"] = false
|
|
||||||
bool_values["true"] = true
|
|
||||||
bool_values["false"] = false
|
|
||||||
bool_values["on"] = true
|
|
||||||
bool_values["off"] = false
|
|
||||||
|
|
||||||
null_values = make(map[string]bool)
|
|
||||||
null_values["~"] = true
|
|
||||||
null_values["null"] = true
|
|
||||||
null_values["Null"] = true
|
|
||||||
null_values["NULL"] = true
|
|
||||||
|
|
||||||
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
|
|
||||||
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
|
|
||||||
val := string(event.value)
|
|
||||||
|
|
||||||
if null_values[val] {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if useNumber && v.Type() == numberType {
|
|
||||||
tag, i := resolveInterface(event, useNumber)
|
|
||||||
if n, ok := i.(Number); ok {
|
|
||||||
v.Set(reflect.ValueOf(n))
|
|
||||||
return tag, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resolve_string(val, v, event)
|
|
||||||
case reflect.Bool:
|
|
||||||
return resolve_bool(val, v, event)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return resolve_int(val, v, useNumber, event)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return resolve_uint(val, v, useNumber, event)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return resolve_float(val, v, useNumber, event)
|
|
||||||
case reflect.Interface:
|
|
||||||
_, i := resolveInterface(event, useNumber)
|
|
||||||
if i != nil {
|
|
||||||
v.Set(reflect.ValueOf(i))
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
return resolve_time(val, v, event)
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.Type() != byteSliceType {
|
|
||||||
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(b))
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasBinaryTag(event yaml_event_t) bool {
|
|
||||||
for _, tag := range binary_tags {
|
|
||||||
if bytes.Equal(event.tag, tag) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
|
|
||||||
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
|
|
||||||
n, err := base64.StdEncoding.Decode(b, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
if len(event.tag) > 0 {
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
val = string(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v.SetString(val)
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
b, found := bool_values[strings.ToLower(val)]
|
|
||||||
if !found {
|
|
||||||
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetBool(b)
|
|
||||||
return yaml_BOOL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
sign := int64(1)
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
var val64 int64
|
|
||||||
if value <= math.MaxInt64 {
|
|
||||||
val64 = int64(value)
|
|
||||||
if sign == -1 {
|
|
||||||
val64 = -val64
|
|
||||||
}
|
|
||||||
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
|
|
||||||
val64 = math.MinInt64
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatInt(val64, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowInt(val64) {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
v.SetInt(val64)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
if val[0] == '-' {
|
|
||||||
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatUint(value, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowUint(value) {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetUint(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value float64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
typeBits := 64
|
|
||||||
if !isNumberValue {
|
|
||||||
typeBits = v.Type().Bits()
|
|
||||||
}
|
|
||||||
|
|
||||||
sign := 1
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
valLower := strings.ToLower(val)
|
|
||||||
if valLower == ".inf" {
|
|
||||||
value = math.Inf(sign)
|
|
||||||
} else if valLower == ".nan" {
|
|
||||||
value = math.NaN()
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
value, err = strconv.ParseFloat(val, typeBits)
|
|
||||||
value *= float64(sign)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
|
|
||||||
} else {
|
|
||||||
if v.OverflowFloat(value) {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetFloat(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_FLOAT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
var parsedTime time.Time
|
|
||||||
matches := ymd_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) > 0 {
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
|
|
||||||
} else {
|
|
||||||
matches = timestamp_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
hour, _ := strconv.Atoi(matches[4])
|
|
||||||
min, _ := strconv.Atoi(matches[5])
|
|
||||||
sec, _ := strconv.Atoi(matches[6])
|
|
||||||
|
|
||||||
nsec := 0
|
|
||||||
if matches[7] != "" {
|
|
||||||
millis, _ := strconv.Atoi(matches[7])
|
|
||||||
nsec = int(time.Duration(millis) * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
loc := time.UTC
|
|
||||||
if matches[8] != "" {
|
|
||||||
sign := matches[8][0]
|
|
||||||
hr, _ := strconv.Atoi(matches[8][1:])
|
|
||||||
min := 0
|
|
||||||
if matches[9] != "" {
|
|
||||||
min, _ = strconv.Atoi(matches[9])
|
|
||||||
}
|
|
||||||
|
|
||||||
zoneOffset := (hr*60 + min) * 60
|
|
||||||
if sign == '-' {
|
|
||||||
zoneOffset = -zoneOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
loc = time.FixedZone("", zoneOffset)
|
|
||||||
}
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(parsedTime))
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
|
|
||||||
val := string(event.value)
|
|
||||||
if len(event.tag) == 0 && !event.implicit {
|
|
||||||
return "", val
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(val) == 0 {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var result interface{}
|
|
||||||
|
|
||||||
sign := false
|
|
||||||
c := val[0]
|
|
||||||
switch {
|
|
||||||
case bytes.IndexByte(signs, c) != -1:
|
|
||||||
sign = true
|
|
||||||
fallthrough
|
|
||||||
case c >= '0' && c <= '9':
|
|
||||||
i := int64(0)
|
|
||||||
result = &i
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_int(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_INT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v = reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sign {
|
|
||||||
t := time.Time{}
|
|
||||||
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
|
|
||||||
return "", t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(nulls, c) != -1:
|
|
||||||
if null_values[val] {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
case c == '.':
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(bools, c) != -1:
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
bytes, err := decode_binary(event.value, event)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_BINARY_TAG, bytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, val
|
|
||||||
}
|
|
||||||
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
@ -1,62 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Run_parser(cmd string, args []string) {
|
|
||||||
for i := 0; i < len(args); i++ {
|
|
||||||
fmt.Printf("[%d] Scanning '%s'", i, args[i])
|
|
||||||
file, err := os.Open(args[i])
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
parser := yaml_parser_t{}
|
|
||||||
yaml_parser_initialize(&parser)
|
|
||||||
yaml_parser_set_input_reader(&parser, file)
|
|
||||||
|
|
||||||
failed := false
|
|
||||||
token := yaml_token_t{}
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
if !yaml_parser_scan(&parser, &token) {
|
|
||||||
failed = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.token_type == yaml_STREAM_END_TOKEN {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
msg := "SUCCESS"
|
|
||||||
if failed {
|
|
||||||
msg = "FAILED"
|
|
||||||
if parser.error != yaml_NO_ERROR {
|
|
||||||
m := parser.problem_mark
|
|
||||||
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
|
|
||||||
parser.context, parser.problem, m.line, m.column)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("%s (%d tokens)\n", msg, count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
File diff suppressed because it is too large
Load Diff
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
@ -1,360 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string
|
|
||||||
tag bool
|
|
||||||
index []int
|
|
||||||
typ reflect.Type
|
|
||||||
omitEmpty bool
|
|
||||||
flow bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from json tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
|
||||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
|
||||||
// and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tag := sf.Tag.Get("yaml")
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name, opts := parseTag(tag)
|
|
||||||
if !isValidTag(name) {
|
|
||||||
name = ""
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := name != ""
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft,
|
|
||||||
opts.Contains("omitempty"), opts.Contains("flow")})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with JSON tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// JSON tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagOptions is the string following a comma in a struct field's "json"
|
|
||||||
// tag, or the empty string. It does not include the leading comma.
|
|
||||||
type tagOptions string
|
|
||||||
|
|
||||||
func isValidTag(s string) bool {
|
|
||||||
if s == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, c := range s {
|
|
||||||
switch {
|
|
||||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
|
||||||
// Backslash and quote chars are reserved, but
|
|
||||||
// otherwise any punctuation chars are allowed
|
|
||||||
// in a tag name.
|
|
||||||
default:
|
|
||||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
|
|
||||||
for _, i := range index {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
if v.IsNil() {
|
|
||||||
return reflect.Value{}
|
|
||||||
}
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
v = v.Field(i)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
|
||||||
for _, i := range index {
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
t = t.Field(i).Type
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
|
||||||
// It implements the methods to sort by string.
|
|
||||||
type stringValues []reflect.Value
|
|
||||||
|
|
||||||
func (sv stringValues) Len() int { return len(sv) }
|
|
||||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
|
||||||
func (sv stringValues) Less(i, j int) bool {
|
|
||||||
av, ak := getElem(sv[i])
|
|
||||||
bv, bk := getElem(sv[j])
|
|
||||||
if ak == reflect.String && bk == reflect.String {
|
|
||||||
return av.String() < bv.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ak < bk
|
|
||||||
}
|
|
||||||
|
|
||||||
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
|
|
||||||
k := v.Kind()
|
|
||||||
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
k = v.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, k
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag splits a struct field's json tag into its name and
|
|
||||||
// comma-separated options.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
if idx := strings.Index(tag, ","); idx != -1 {
|
|
||||||
return tag[:idx], tagOptions(tag[idx+1:])
|
|
||||||
}
|
|
||||||
return tag, tagOptions("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains reports whether a comma-separated list of options
|
|
||||||
// contains a particular substr flag. substr must be surrounded by a
|
|
||||||
// string boundary or commas.
|
|
||||||
func (o tagOptions) Contains(optionName string) bool {
|
|
||||||
if len(o) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := string(o)
|
|
||||||
for s != "" {
|
|
||||||
var next string
|
|
||||||
i := strings.Index(s, ",")
|
|
||||||
if i >= 0 {
|
|
||||||
s, next = s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
if s == optionName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
s = next
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
128
vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
generated
vendored
128
vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
generated
vendored
@ -1,128 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the writer error and return 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
|
||||||
emitter.error = yaml_WRITER_ERROR
|
|
||||||
emitter.problem = problem
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush the output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
|
||||||
if emitter.write_handler == nil {
|
|
||||||
panic("Write handler must be set") /* Write handler must be set. */
|
|
||||||
}
|
|
||||||
if emitter.encoding == yaml_ANY_ENCODING {
|
|
||||||
panic("Encoding must be set") /* Output encoding must be set. */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the buffer is empty. */
|
|
||||||
|
|
||||||
if emitter.buffer_pos == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the output encoding is UTF-8, we don't need to recode the buffer. */
|
|
||||||
|
|
||||||
if emitter.encoding == yaml_UTF8_ENCODING {
|
|
||||||
if err := emitter.write_handler(emitter,
|
|
||||||
emitter.buffer[:emitter.buffer_pos]); err != nil {
|
|
||||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
|
||||||
}
|
|
||||||
emitter.buffer_pos = 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Recode the buffer into the raw buffer. */
|
|
||||||
|
|
||||||
var low, high int
|
|
||||||
if emitter.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
high, low = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pos := 0
|
|
||||||
for pos < emitter.buffer_pos {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* See the "reader.c" code for more details on UTF-8 encoding. Note
|
|
||||||
* that we assume that the buffer contains a valid UTF-8 sequence.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Read the next UTF-8 character. */
|
|
||||||
|
|
||||||
octet := emitter.buffer[pos]
|
|
||||||
|
|
||||||
var w int
|
|
||||||
var value rune
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
w, value = 1, rune(octet&0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
w, value = 2, rune(octet&0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
w, value = 3, rune(octet&0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
w, value = 4, rune(octet&0x07)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k := 1; k < w; k++ {
|
|
||||||
octet = emitter.buffer[pos+k]
|
|
||||||
value = (value << 6) + (rune(octet) & 0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
pos += w
|
|
||||||
|
|
||||||
/* Write the character. */
|
|
||||||
|
|
||||||
if value < 0x10000 {
|
|
||||||
var b [2]byte
|
|
||||||
b[high] = byte(value >> 8)
|
|
||||||
b[low] = byte(value & 0xFF)
|
|
||||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
|
|
||||||
} else {
|
|
||||||
/* Write the character using a surrogate pair (check "reader.c"). */
|
|
||||||
|
|
||||||
var b [4]byte
|
|
||||||
value -= 0x10000
|
|
||||||
b[high] = byte(0xD8 + (value >> 18))
|
|
||||||
b[low] = byte((value >> 10) & 0xFF)
|
|
||||||
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
|
|
||||||
b[low+2] = byte(value & 0xFF)
|
|
||||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write the raw buffer. */
|
|
||||||
|
|
||||||
// Write the raw buffer.
|
|
||||||
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
|
|
||||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.buffer_pos = 0
|
|
||||||
emitter.raw_buffer = emitter.raw_buffer[:0]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_VERSION_MAJOR = 0
|
|
||||||
yaml_VERSION_MINOR = 1
|
|
||||||
yaml_VERSION_PATCH = 6
|
|
||||||
yaml_VERSION_STRING = "0.1.6"
|
|
||||||
)
|
|
||||||
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
@ -1,891 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
INPUT_RAW_BUFFER_SIZE = 1024
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the input buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to decode the whole raw buffer.
|
|
||||||
*/
|
|
||||||
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_BUFFER_SIZE = 512
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output raw buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to encode the whole output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
|
|
||||||
|
|
||||||
INITIAL_STACK_SIZE = 16
|
|
||||||
INITIAL_QUEUE_SIZE = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
func width(b byte) int {
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xE0 == 0xC0 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF0 == 0xE0 {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF8 == 0xF0 {
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
|
|
||||||
w := width(src[*src_pos])
|
|
||||||
switch w {
|
|
||||||
case 4:
|
|
||||||
dest[*dest_pos+3] = src[*src_pos+3]
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
dest[*dest_pos+2] = src[*src_pos+2]
|
|
||||||
fallthrough
|
|
||||||
case 2:
|
|
||||||
dest[*dest_pos+1] = src[*src_pos+1]
|
|
||||||
fallthrough
|
|
||||||
case 1:
|
|
||||||
dest[*dest_pos] = src[*src_pos]
|
|
||||||
default:
|
|
||||||
panic("invalid width")
|
|
||||||
}
|
|
||||||
*dest_pos += w
|
|
||||||
*src_pos += w
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
|
|
||||||
func is_alpha(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'Z') ||
|
|
||||||
(b >= 'a' && b <= 'z') ||
|
|
||||||
b == '_' || b == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_digit(b byte) bool {
|
|
||||||
return b >= '0' && b <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_digit(b byte) int {
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_hex(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'F') ||
|
|
||||||
(b >= 'a' && b <= 'f')
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_hex(b byte) int {
|
|
||||||
if b >= 'A' && b <= 'F' {
|
|
||||||
return int(b) - 'A' + 10
|
|
||||||
} else if b >= 'a' && b <= 'f' {
|
|
||||||
return int(b) - 'a' + 10
|
|
||||||
}
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
func is_blankz_at(b []byte, i int) bool {
|
|
||||||
return is_blank(b[i]) || is_breakz_at(b, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
func is_break_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' || /* CR (#xD)*/
|
|
||||||
b[i] == '\n' || /* LF (#xA) */
|
|
||||||
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_breakz_at(b []byte, i int) bool {
|
|
||||||
return is_break_at(b, i) || is_z(b[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_crlf_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' && b[i+1] == '\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
func is_z(b byte) bool {
|
|
||||||
return b == 0x0
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
func is_space(b byte) bool {
|
|
||||||
return b == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
func is_tab(b byte) bool {
|
|
||||||
return b == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
func is_blank(b byte) bool {
|
|
||||||
return is_space(b) || is_tab(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
func is_ascii(b byte) bool {
|
|
||||||
return b <= '\x7f'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
func is_printable_at(b []byte, i int) bool {
|
|
||||||
return ((b[i] == 0x0A) || /* . == #x0A */
|
|
||||||
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
|
|
||||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
|
|
||||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
|
||||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
|
||||||
(b[i] == 0xEE) ||
|
|
||||||
(b[i] == 0xEF && /* && . != #xFEFF */
|
|
||||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
|
|
||||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
|
||||||
// collapse the slice
|
|
||||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
|
||||||
if parser.tokens_head != len(parser.tokens) {
|
|
||||||
// move the tokens down
|
|
||||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
|
||||||
}
|
|
||||||
// readjust the length
|
|
||||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
|
||||||
parser.tokens_head = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.tokens = append(parser.tokens, *token)
|
|
||||||
if pos < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
|
||||||
parser.tokens[parser.tokens_head+pos] = *token
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_bom_at(b []byte, i int) bool {
|
|
||||||
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// #ifdef HAVE_CONFIG_H
|
|
||||||
// #include <config.h>
|
|
||||||
// #endif
|
|
||||||
//
|
|
||||||
// #include "./yaml.h"
|
|
||||||
//
|
|
||||||
// #include <assert.h>
|
|
||||||
// #include <limits.h>
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Memory management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_malloc(size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_realloc(void *ptr, size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_free(void *ptr);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_char_t *)
|
|
||||||
// yaml_strdup(const yaml_char_t *);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Reader: Ensure that the buffer contains at least `length` characters.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Scanner: Ensure that the token stack contains at least one token ready.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_RAW_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to decode the whole raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output raw buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to encode the whole output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of other stacks and queues.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INITIAL_STACK_SIZE 16
|
|
||||||
// #define INITIAL_QUEUE_SIZE 16
|
|
||||||
// #define INITIAL_STRING_SIZE 16
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Buffer management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define BUFFER_INIT(context,buffer,size) \
|
|
||||||
// (((buffer).start = yaml_malloc(size)) ? \
|
|
||||||
// ((buffer).last = (buffer).pointer = (buffer).start, \
|
|
||||||
// (buffer).end = (buffer).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define BUFFER_DEL(context,buffer) \
|
|
||||||
// (yaml_free((buffer).start), \
|
|
||||||
// (buffer).start = (buffer).pointer = (buffer).end = 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// typedef struct {
|
|
||||||
// yaml_char_t *start;
|
|
||||||
// yaml_char_t *end;
|
|
||||||
// yaml_char_t *pointer;
|
|
||||||
// } yaml_string_t;
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_extend(yaml_char_t **start,
|
|
||||||
// yaml_char_t **pointer, yaml_char_t **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_join(
|
|
||||||
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
|
|
||||||
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
|
|
||||||
//
|
|
||||||
// #define NULL_STRING { NULL, NULL, NULL }
|
|
||||||
//
|
|
||||||
// #define STRING(string,length) { (string), (string)+(length), (string) }
|
|
||||||
//
|
|
||||||
// #define STRING_ASSIGN(value,string,length) \
|
|
||||||
// ((value).start = (string), \
|
|
||||||
// (value).end = (string)+(length), \
|
|
||||||
// (value).pointer = (string))
|
|
||||||
//
|
|
||||||
// #define STRING_INIT(context,string,size) \
|
|
||||||
// (((string).start = yaml_malloc(size)) ? \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// (string).end = (string).start+(size), \
|
|
||||||
// memset((string).start, 0, (size)), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STRING_DEL(context,string) \
|
|
||||||
// (yaml_free((string).start), \
|
|
||||||
// (string).start = (string).pointer = (string).end = 0)
|
|
||||||
//
|
|
||||||
// #define STRING_EXTEND(context,string) \
|
|
||||||
// (((string).pointer+5 < (string).end) \
|
|
||||||
// || yaml_string_extend(&(string).start, \
|
|
||||||
// &(string).pointer, &(string).end))
|
|
||||||
//
|
|
||||||
// #define CLEAR(context,string) \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// memset((string).start, 0, (string).end-(string).start))
|
|
||||||
//
|
|
||||||
// #define JOIN(context,string_a,string_b) \
|
|
||||||
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
|
|
||||||
// &(string_a).end, &(string_b).start, \
|
|
||||||
// &(string_b).pointer, &(string_b).end)) ? \
|
|
||||||
// ((string_b).pointer = (string_b).start, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String check operations.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the octet at the specified position.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK_AT(string,octet,offset) \
|
|
||||||
// ((string).pointer[offset] == (yaml_char_t)(octet))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the current octet in the buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
|
|
||||||
// (string).pointer[offset] == '_' || \
|
|
||||||
// (string).pointer[offset] == '-')
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9'))
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0')
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f'))
|
|
||||||
//
|
|
||||||
// #define IS_HEX(string) IS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
//
|
|
||||||
// #define AS_HEX(string) AS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ASCII_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
|
|
||||||
//
|
|
||||||
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
|
|
||||||
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
|
|
||||||
// && (string).pointer[offset] <= 0x7E) \
|
|
||||||
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
|
|
||||||
// && (string).pointer[offset+1] >= 0xA0) \
|
|
||||||
// || ((string).pointer[offset] > 0xC2 \
|
|
||||||
// && (string).pointer[offset] < 0xED) \
|
|
||||||
// || ((string).pointer[offset] == 0xED \
|
|
||||||
// && (string).pointer[offset+1] < 0xA0) \
|
|
||||||
// || ((string).pointer[offset] == 0xEE) \
|
|
||||||
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
|
|
||||||
// && (string).pointer[offset+2] == 0xBF) \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBF \
|
|
||||||
// && ((string).pointer[offset+2] == 0xBE \
|
|
||||||
// || (string).pointer[offset+2] == 0xBF))))
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_Z(string) IS_Z_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BOM_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\xEF',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\xBB',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
|
|
||||||
//
|
|
||||||
// #define IS_BOM(string) IS_BOM_AT(string,0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_TAB(string) IS_TAB_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANK_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
|
|
||||||
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
|
|
||||||
// || (CHECK_AT((string),'\xC2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
|
|
||||||
//
|
|
||||||
// #define IS_CRLF_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
|
|
||||||
//
|
|
||||||
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ_AT(string,offset) \
|
|
||||||
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ_AT(string,offset) \
|
|
||||||
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Determine the width of the character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define WIDTH_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
|
|
||||||
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
|
|
||||||
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
|
|
||||||
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
|
|
||||||
//
|
|
||||||
// #define WIDTH(string) WIDTH_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Move the string pointer to the next character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define MOVE(string) ((string).pointer += WIDTH((string)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Copy a character and move the pointers of both strings.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define COPY(string_a,string_b) \
|
|
||||||
// ((*(string_b).pointer & 0x80) == 0x00 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Stack and queue management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_stack_extend(void **start, void **top, void **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
|
|
||||||
//
|
|
||||||
// #define STACK_INIT(context,stack,size) \
|
|
||||||
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
|
|
||||||
// ((stack).top = (stack).start, \
|
|
||||||
// (stack).end = (stack).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STACK_DEL(context,stack) \
|
|
||||||
// (yaml_free((stack).start), \
|
|
||||||
// (stack).start = (stack).top = (stack).end = 0)
|
|
||||||
//
|
|
||||||
// #define STACK_EMPTY(context,stack) \
|
|
||||||
// ((stack).start == (stack).top)
|
|
||||||
//
|
|
||||||
// #define PUSH(context,stack,value) \
|
|
||||||
// (((stack).top != (stack).end \
|
|
||||||
// || yaml_stack_extend((void **)&(stack).start, \
|
|
||||||
// (void **)&(stack).top, (void **)&(stack).end)) ? \
|
|
||||||
// (*((stack).top++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define POP(context,stack) \
|
|
||||||
// (*(--(stack).top))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INIT(context,queue,size) \
|
|
||||||
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
|
|
||||||
// ((queue).head = (queue).tail = (queue).start, \
|
|
||||||
// (queue).end = (queue).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define QUEUE_DEL(context,queue) \
|
|
||||||
// (yaml_free((queue).start), \
|
|
||||||
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
|
|
||||||
//
|
|
||||||
// #define QUEUE_EMPTY(context,queue) \
|
|
||||||
// ((queue).head == (queue).tail)
|
|
||||||
//
|
|
||||||
// #define ENQUEUE(context,queue,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (*((queue).tail++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define DEQUEUE(context,queue) \
|
|
||||||
// (*((queue).head++))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INSERT(context,queue,index,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (memmove((queue).head+(index)+1,(queue).head+(index), \
|
|
||||||
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
|
|
||||||
// *((queue).head+(index)) = value, \
|
|
||||||
// (queue).tail++, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Token initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
|
|
||||||
// (memset(&(token), 0, sizeof(yaml_token_t)), \
|
|
||||||
// (token).type = (token_type), \
|
|
||||||
// (token).start_mark = (token_start_mark), \
|
|
||||||
// (token).end_mark = (token_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.stream_start.encoding = (token_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.alias.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.anchor.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag.handle = (token_handle), \
|
|
||||||
// (token).data.tag.suffix = (token_suffix))
|
|
||||||
//
|
|
||||||
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.scalar.value = (token_value), \
|
|
||||||
// (token).data.scalar.length = (token_length), \
|
|
||||||
// (token).data.scalar.style = (token_style))
|
|
||||||
//
|
|
||||||
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.version_directive.major = (token_major), \
|
|
||||||
// (token).data.version_directive.minor = (token_minor))
|
|
||||||
//
|
|
||||||
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag_directive.handle = (token_handle), \
|
|
||||||
// (token).data.tag_directive.prefix = (token_prefix))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Event initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
|
|
||||||
// (memset(&(event), 0, sizeof(yaml_event_t)), \
|
|
||||||
// (event).type = (event_type), \
|
|
||||||
// (event).start_mark = (event_start_mark), \
|
|
||||||
// (event).end_mark = (event_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.stream_start.encoding = (event_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
|
|
||||||
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_start.version_directive = (event_version_directive), \
|
|
||||||
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
|
|
||||||
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
|
|
||||||
// (event).data.document_start.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_end.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.alias.anchor = (event_anchor))
|
|
||||||
//
|
|
||||||
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
|
|
||||||
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.scalar.anchor = (event_anchor), \
|
|
||||||
// (event).data.scalar.tag = (event_tag), \
|
|
||||||
// (event).data.scalar.value = (event_value), \
|
|
||||||
// (event).data.scalar.length = (event_length), \
|
|
||||||
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
|
|
||||||
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
|
|
||||||
// (event).data.scalar.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.sequence_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.sequence_start.tag = (event_tag), \
|
|
||||||
// (event).data.sequence_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.sequence_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.mapping_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.mapping_start.tag = (event_tag), \
|
|
||||||
// (event).data.mapping_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.mapping_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Document initializer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
|
|
||||||
// document_version_directive,document_tag_directives_start, \
|
|
||||||
// document_tag_directives_end,document_start_implicit, \
|
|
||||||
// document_end_implicit,document_start_mark,document_end_mark) \
|
|
||||||
// (memset(&(document), 0, sizeof(yaml_document_t)), \
|
|
||||||
// (document).nodes.start = (document_nodes_start), \
|
|
||||||
// (document).nodes.end = (document_nodes_end), \
|
|
||||||
// (document).nodes.top = (document_nodes_start), \
|
|
||||||
// (document).version_directive = (document_version_directive), \
|
|
||||||
// (document).tag_directives.start = (document_tag_directives_start), \
|
|
||||||
// (document).tag_directives.end = (document_tag_directives_end), \
|
|
||||||
// (document).start_implicit = (document_start_implicit), \
|
|
||||||
// (document).end_implicit = (document_end_implicit), \
|
|
||||||
// (document).start_mark = (document_start_mark), \
|
|
||||||
// (document).end_mark = (document_end_mark))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Node initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
|
|
||||||
// (memset(&(node), 0, sizeof(yaml_node_t)), \
|
|
||||||
// (node).type = (node_type), \
|
|
||||||
// (node).tag = (node_tag), \
|
|
||||||
// (node).start_mark = (node_start_mark), \
|
|
||||||
// (node).end_mark = (node_end_mark))
|
|
||||||
//
|
|
||||||
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.scalar.value = (node_value), \
|
|
||||||
// (node).data.scalar.length = (node_length), \
|
|
||||||
// (node).data.scalar.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.sequence.items.start = (node_items_start), \
|
|
||||||
// (node).data.sequence.items.end = (node_items_end), \
|
|
||||||
// (node).data.sequence.items.top = (node_items_start), \
|
|
||||||
// (node).data.sequence.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.mapping.pairs.start = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.pairs.end = (node_pairs_end), \
|
|
||||||
// (node).data.mapping.pairs.top = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.style = (node_style))
|
|
||||||
//
|
|
||||||
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
@ -1,953 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The version directive data. */
|
|
||||||
type yaml_version_directive_t struct {
|
|
||||||
major int // The major version number
|
|
||||||
minor int // The minor version number
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The tag directive data. */
|
|
||||||
type yaml_tag_directive_t struct {
|
|
||||||
handle []byte // The tag handle
|
|
||||||
prefix []byte // The tag prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
type yaml_encoding_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the parser choose the encoding. */
|
|
||||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
|
||||||
/** The defau lt UTF-8 encoding. */
|
|
||||||
yaml_UTF8_ENCODING
|
|
||||||
/** The UTF-16-LE encoding with BOM. */
|
|
||||||
yaml_UTF16LE_ENCODING
|
|
||||||
/** The UTF-16-BE encoding with BOM. */
|
|
||||||
yaml_UTF16BE_ENCODING
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Line break types. */
|
|
||||||
type yaml_break_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
|
|
||||||
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
|
|
||||||
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
|
|
||||||
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Many bad things could happen with the parser and emitter. */
|
|
||||||
type YAML_error_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** No error is produced. */
|
|
||||||
yaml_NO_ERROR YAML_error_type_t = iota
|
|
||||||
|
|
||||||
/** Cannot allocate or reallocate a block of memory. */
|
|
||||||
yaml_MEMORY_ERROR
|
|
||||||
|
|
||||||
/** Cannot read or decode the input stream. */
|
|
||||||
yaml_READER_ERROR
|
|
||||||
/** Cannot scan the input stream. */
|
|
||||||
yaml_SCANNER_ERROR
|
|
||||||
/** Cannot parse the input stream. */
|
|
||||||
yaml_PARSER_ERROR
|
|
||||||
/** Cannot compose a YAML document. */
|
|
||||||
yaml_COMPOSER_ERROR
|
|
||||||
|
|
||||||
/** Cannot write to the output stream. */
|
|
||||||
yaml_WRITER_ERROR
|
|
||||||
/** Cannot emit a YAML stream. */
|
|
||||||
yaml_EMITTER_ERROR
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The pointer position. */
|
|
||||||
type YAML_mark_t struct {
|
|
||||||
/** The position index. */
|
|
||||||
index int
|
|
||||||
|
|
||||||
/** The position line. */
|
|
||||||
line int
|
|
||||||
|
|
||||||
/** The position column. */
|
|
||||||
column int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m YAML_mark_t) String() string {
|
|
||||||
return fmt.Sprintf("line %d, column %d", m.line, m.column)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup styles Node Styles
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_style_t int
|
|
||||||
|
|
||||||
/** Scalar styles. */
|
|
||||||
type yaml_scalar_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
|
||||||
|
|
||||||
/** The plain scalar style. */
|
|
||||||
yaml_PLAIN_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The single-quoted scalar style. */
|
|
||||||
yaml_SINGLE_QUOTED_SCALAR_STYLE
|
|
||||||
/** The double-quoted scalar style. */
|
|
||||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The literal scalar style. */
|
|
||||||
yaml_LITERAL_SCALAR_STYLE
|
|
||||||
/** The folded scalar style. */
|
|
||||||
yaml_FOLDED_SCALAR_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Sequence styles. */
|
|
||||||
type yaml_sequence_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
|
||||||
|
|
||||||
/** The block sequence style. */
|
|
||||||
yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
/** The flow sequence style. */
|
|
||||||
yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Mapping styles. */
|
|
||||||
type yaml_mapping_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
|
||||||
|
|
||||||
/** The block mapping style. */
|
|
||||||
yaml_BLOCK_MAPPING_STYLE
|
|
||||||
/** The flow mapping style. */
|
|
||||||
yaml_FLOW_MAPPING_STYLE
|
|
||||||
|
|
||||||
/* yaml_FLOW_SET_MAPPING_STYLE */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup tokens Tokens
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Token types. */
|
|
||||||
type yaml_token_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty token. */
|
|
||||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START token. */
|
|
||||||
yaml_STREAM_START_TOKEN
|
|
||||||
/** A STREAM-END token. */
|
|
||||||
yaml_STREAM_END_TOKEN
|
|
||||||
|
|
||||||
/** A VERSION-DIRECTIVE token. */
|
|
||||||
yaml_VERSION_DIRECTIVE_TOKEN
|
|
||||||
/** A TAG-DIRECTIVE token. */
|
|
||||||
yaml_TAG_DIRECTIVE_TOKEN
|
|
||||||
/** A DOCUMENT-START token. */
|
|
||||||
yaml_DOCUMENT_START_TOKEN
|
|
||||||
/** A DOCUMENT-END token. */
|
|
||||||
yaml_DOCUMENT_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-SEQUENCE-START token. */
|
|
||||||
yaml_BLOCK_SEQUENCE_START_TOKEN
|
|
||||||
/** A BLOCK-SEQUENCE-END token. */
|
|
||||||
yaml_BLOCK_MAPPING_START_TOKEN
|
|
||||||
/** A BLOCK-END token. */
|
|
||||||
yaml_BLOCK_END_TOKEN
|
|
||||||
|
|
||||||
/** A FLOW-SEQUENCE-START token. */
|
|
||||||
yaml_FLOW_SEQUENCE_START_TOKEN
|
|
||||||
/** A FLOW-SEQUENCE-END token. */
|
|
||||||
yaml_FLOW_SEQUENCE_END_TOKEN
|
|
||||||
/** A FLOW-MAPPING-START token. */
|
|
||||||
yaml_FLOW_MAPPING_START_TOKEN
|
|
||||||
/** A FLOW-MAPPING-END token. */
|
|
||||||
yaml_FLOW_MAPPING_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-ENTRY token. */
|
|
||||||
yaml_BLOCK_ENTRY_TOKEN
|
|
||||||
/** A FLOW-ENTRY token. */
|
|
||||||
yaml_FLOW_ENTRY_TOKEN
|
|
||||||
/** A KEY token. */
|
|
||||||
yaml_KEY_TOKEN
|
|
||||||
/** A VALUE token. */
|
|
||||||
yaml_VALUE_TOKEN
|
|
||||||
|
|
||||||
/** An ALIAS token. */
|
|
||||||
yaml_ALIAS_TOKEN
|
|
||||||
/** An ANCHOR token. */
|
|
||||||
yaml_ANCHOR_TOKEN
|
|
||||||
/** A TAG token. */
|
|
||||||
yaml_TAG_TOKEN
|
|
||||||
/** A SCALAR token. */
|
|
||||||
yaml_SCALAR_TOKEN
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The token structure. */
|
|
||||||
type yaml_token_t struct {
|
|
||||||
|
|
||||||
/** The token type. */
|
|
||||||
token_type yaml_token_type_t
|
|
||||||
|
|
||||||
/** The token data. */
|
|
||||||
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
|
|
||||||
/** The anchor (for @c ). */
|
|
||||||
/** The scalar value (for @c ). */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
|
|
||||||
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
|
|
||||||
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
|
|
||||||
version_directive yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
|
|
||||||
prefix []byte
|
|
||||||
|
|
||||||
/** The beginning of the token. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the token. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
|
|
||||||
major, minor int
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup events Events
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Event types. */
|
|
||||||
type yaml_event_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty event. */
|
|
||||||
yaml_NO_EVENT yaml_event_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START event. */
|
|
||||||
yaml_STREAM_START_EVENT
|
|
||||||
/** A STREAM-END event. */
|
|
||||||
yaml_STREAM_END_EVENT
|
|
||||||
|
|
||||||
/** A DOCUMENT-START event. */
|
|
||||||
yaml_DOCUMENT_START_EVENT
|
|
||||||
/** A DOCUMENT-END event. */
|
|
||||||
yaml_DOCUMENT_END_EVENT
|
|
||||||
|
|
||||||
/** An ALIAS event. */
|
|
||||||
yaml_ALIAS_EVENT
|
|
||||||
/** A SCALAR event. */
|
|
||||||
yaml_SCALAR_EVENT
|
|
||||||
|
|
||||||
/** A SEQUENCE-START event. */
|
|
||||||
yaml_SEQUENCE_START_EVENT
|
|
||||||
/** A SEQUENCE-END event. */
|
|
||||||
yaml_SEQUENCE_END_EVENT
|
|
||||||
|
|
||||||
/** A MAPPING-START event. */
|
|
||||||
yaml_MAPPING_START_EVENT
|
|
||||||
/** A MAPPING-END event. */
|
|
||||||
yaml_MAPPING_END_EVENT
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The event structure. */
|
|
||||||
type yaml_event_t struct {
|
|
||||||
|
|
||||||
/** The event type. */
|
|
||||||
event_type yaml_event_type_t
|
|
||||||
|
|
||||||
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The beginning and end of the tag directives list. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
|
|
||||||
/** Is the document indicator implicit? */
|
|
||||||
implicit bool
|
|
||||||
|
|
||||||
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The tag. */
|
|
||||||
tag []byte
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** Is the tag optional for the plain style? */
|
|
||||||
plain_implicit bool
|
|
||||||
/** Is the tag optional for any non-plain style? */
|
|
||||||
quoted_implicit bool
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The sequence style. */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_style_t
|
|
||||||
|
|
||||||
/** The beginning of the event. */
|
|
||||||
start_mark, end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup nodes Nodes
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** The tag @c !!null with the only possible value: @c null. */
|
|
||||||
yaml_NULL_TAG = "tag:yaml.org,2002:null"
|
|
||||||
/** The tag @c !!bool with the values: @c true and @c falce. */
|
|
||||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
|
|
||||||
/** The tag @c !!str for string values. */
|
|
||||||
yaml_STR_TAG = "tag:yaml.org,2002:str"
|
|
||||||
/** The tag @c !!int for integer values. */
|
|
||||||
yaml_INT_TAG = "tag:yaml.org,2002:int"
|
|
||||||
/** The tag @c !!float for float values. */
|
|
||||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
|
|
||||||
/** The tag @c !!timestamp for date and time values. */
|
|
||||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
|
|
||||||
|
|
||||||
/** The tag @c !!seq is used to denote sequences. */
|
|
||||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
|
|
||||||
/** The tag @c !!map is used to denote mapping. */
|
|
||||||
yaml_MAP_TAG = "tag:yaml.org,2002:map"
|
|
||||||
|
|
||||||
/** The default scalar tag is @c !!str. */
|
|
||||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
|
|
||||||
/** The default sequence tag is @c !!seq. */
|
|
||||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
|
|
||||||
/** The default mapping tag is @c !!map. */
|
|
||||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
|
|
||||||
|
|
||||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Node types. */
|
|
||||||
type yaml_node_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty node. */
|
|
||||||
yaml_NO_NODE yaml_node_type_t = iota
|
|
||||||
|
|
||||||
/** A scalar node. */
|
|
||||||
yaml_SCALAR_NODE
|
|
||||||
/** A sequence node. */
|
|
||||||
yaml_SEQUENCE_NODE
|
|
||||||
/** A mapping node. */
|
|
||||||
yaml_MAPPING_NODE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** An element of a sequence node. */
|
|
||||||
type yaml_node_item_t int
|
|
||||||
|
|
||||||
/** An element of a mapping node. */
|
|
||||||
type yaml_node_pair_t struct {
|
|
||||||
/** The key of the element. */
|
|
||||||
key int
|
|
||||||
/** The value of the element. */
|
|
||||||
value int
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The node structure. */
|
|
||||||
type yaml_node_t struct {
|
|
||||||
|
|
||||||
/** The node type. */
|
|
||||||
node_type yaml_node_type_t
|
|
||||||
|
|
||||||
/** The node tag. */
|
|
||||||
tag []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
|
|
||||||
scalar struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
|
|
||||||
sequence struct {
|
|
||||||
/** The stack of sequence items. */
|
|
||||||
items []yaml_node_item_t
|
|
||||||
/** The sequence style. */
|
|
||||||
style yaml_sequence_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
|
|
||||||
mapping struct {
|
|
||||||
/** The stack of mapping pairs (key, value). */
|
|
||||||
pairs []yaml_node_pair_t
|
|
||||||
/** The mapping style. */
|
|
||||||
style yaml_mapping_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The beginning of the node. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the node. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The document structure. */
|
|
||||||
type yaml_document_t struct {
|
|
||||||
|
|
||||||
/** The document nodes. */
|
|
||||||
nodes []yaml_node_t
|
|
||||||
|
|
||||||
/** The version directive. */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tags []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** Is the document start indicator implicit? */
|
|
||||||
start_implicit bool
|
|
||||||
/** Is the document end indicator implicit? */
|
|
||||||
end_implicit bool
|
|
||||||
|
|
||||||
/** The beginning of the document. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the document. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a read handler.
|
|
||||||
*
|
|
||||||
* The read handler is called when the parser needs to read more bytes from the
|
|
||||||
* source. The handler should write not more than @a size bytes to the @a
|
|
||||||
* buffer. The number of written bytes should be set to the @a length variable.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_parser_set_input().
|
|
||||||
* @param[out] buffer The buffer to write the data from the source.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
* @param[out] size_read The actual number of bytes read from the source.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0. On EOF, the handler should set the
|
|
||||||
* @a size_read to @c 0 and return @c 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds information about a potential simple key.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_simple_key_t struct {
|
|
||||||
/** Is a simple key possible? */
|
|
||||||
possible bool
|
|
||||||
|
|
||||||
/** Is a simple key required? */
|
|
||||||
required bool
|
|
||||||
|
|
||||||
/** The number of the token. */
|
|
||||||
token_number int
|
|
||||||
|
|
||||||
/** The position mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The states of the parser.
|
|
||||||
*/
|
|
||||||
type yaml_parser_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
|
||||||
/** Expect the beginning of an implicit document. */
|
|
||||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START. */
|
|
||||||
yaml_PARSE_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_PARSE_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_PARSE_DOCUMENT_END_STATE
|
|
||||||
/** Expect a block node. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_STATE
|
|
||||||
/** Expect a block node or indentless sequence. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
|
|
||||||
/** Expect a flow node. */
|
|
||||||
yaml_PARSE_FLOW_NODE_STATE
|
|
||||||
/** Expect the first entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect an entry of an indentless sequence. */
|
|
||||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a block mapping key. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a block mapping value. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect a key of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the and of an ordered mapping entry. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect an empty value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_PARSE_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds aliases data.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_alias_data_t struct {
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
/** The node id. */
|
|
||||||
index int
|
|
||||||
/** The anchor mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The parser structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_parser_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_parser_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
/** The byte about which the problem occured. */
|
|
||||||
problem_offset int
|
|
||||||
/** The problematic value (@c -1 is none). */
|
|
||||||
problem_value int
|
|
||||||
/** The problem position. */
|
|
||||||
problem_mark YAML_mark_t
|
|
||||||
/** The error context. */
|
|
||||||
context string
|
|
||||||
/** The context position. */
|
|
||||||
context_mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Reader stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Read handler. */
|
|
||||||
read_handler yaml_read_handler_t
|
|
||||||
|
|
||||||
/** Reader input data. */
|
|
||||||
input_reader io.Reader
|
|
||||||
input []byte
|
|
||||||
input_pos int
|
|
||||||
|
|
||||||
/** EOF flag */
|
|
||||||
eof bool
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/* The number of unread characters in the buffer. */
|
|
||||||
unread int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The input encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The offset of the current position (in bytes). */
|
|
||||||
offset int
|
|
||||||
|
|
||||||
/** The mark of the current position. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Scanner stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Have we started to scan the input stream? */
|
|
||||||
stream_start_produced bool
|
|
||||||
|
|
||||||
/** Have we reached the end of the input stream? */
|
|
||||||
stream_end_produced bool
|
|
||||||
|
|
||||||
/** The number of unclosed '[' and '{' indicators. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** The tokens queue. */
|
|
||||||
tokens []yaml_token_t
|
|
||||||
tokens_head int
|
|
||||||
|
|
||||||
/** The number of tokens fetched from the queue. */
|
|
||||||
tokens_parsed int
|
|
||||||
|
|
||||||
/* Does the tokens queue contain a token ready for dequeueing. */
|
|
||||||
token_available bool
|
|
||||||
|
|
||||||
/** The indentation levels stack. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** May a simple key occur at the current position? */
|
|
||||||
simple_key_allowed bool
|
|
||||||
|
|
||||||
/** The stack of simple keys. */
|
|
||||||
simple_keys []yaml_simple_key_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Parser stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The parser states stack. */
|
|
||||||
states []yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The current parser state. */
|
|
||||||
state yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The stack of marks. */
|
|
||||||
marks []YAML_mark_t
|
|
||||||
|
|
||||||
/** The list of TAG directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The alias data. */
|
|
||||||
aliases []yaml_alias_data_t
|
|
||||||
|
|
||||||
/** The currently parsed document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a write handler.
|
|
||||||
*
|
|
||||||
* The write handler is called when the emitter needs to flush the accumulated
|
|
||||||
* characters to the output. The handler should write @a size bytes of the
|
|
||||||
* @a buffer to the output.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_emitter_set_output().
|
|
||||||
* @param[in] buffer The buffer with bytes to be written.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
|
||||||
|
|
||||||
/** The emitter states. */
|
|
||||||
type yaml_emitter_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
|
||||||
/** Expect the first DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_EMIT_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_END_STATE
|
|
||||||
/** Expect the first item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect the key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_EMIT_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The emitter structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_emitter_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_emitter_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Writer stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Write handler. */
|
|
||||||
write_handler yaml_write_handler_t
|
|
||||||
|
|
||||||
/** Standard (string or file) output data. */
|
|
||||||
output_buffer *[]byte
|
|
||||||
output_writer io.Writer
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Emitter stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the output is in the canonical style? */
|
|
||||||
canonical bool
|
|
||||||
/** The number of indentation spaces. */
|
|
||||||
best_indent int
|
|
||||||
/** The preferred width of the output lines. */
|
|
||||||
best_width int
|
|
||||||
/** Allow unescaped non-ASCII characters? */
|
|
||||||
unicode bool
|
|
||||||
/** The preferred line break. */
|
|
||||||
line_break yaml_break_t
|
|
||||||
|
|
||||||
/** The stack of states. */
|
|
||||||
states []yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The current emitter state. */
|
|
||||||
state yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The event queue. */
|
|
||||||
events []yaml_event_t
|
|
||||||
events_head int
|
|
||||||
|
|
||||||
/** The stack of indentation levels. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** The current flow level. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** Is it the document root context? */
|
|
||||||
root_context bool
|
|
||||||
/** Is it a sequence context? */
|
|
||||||
sequence_context bool
|
|
||||||
/** Is it a mapping context? */
|
|
||||||
mapping_context bool
|
|
||||||
/** Is it a simple mapping key context? */
|
|
||||||
simple_key_context bool
|
|
||||||
|
|
||||||
/** The current line. */
|
|
||||||
line int
|
|
||||||
/** The current column. */
|
|
||||||
column int
|
|
||||||
/** If the last character was a whitespace? */
|
|
||||||
whitespace bool
|
|
||||||
/** If the last character was an indentation character (' ', '-', '?', ':')? */
|
|
||||||
indention bool
|
|
||||||
/** If an explicit document end is required? */
|
|
||||||
open_ended bool
|
|
||||||
|
|
||||||
/** Anchor analysis. */
|
|
||||||
anchor_data struct {
|
|
||||||
/** The anchor value. */
|
|
||||||
anchor []byte
|
|
||||||
/** Is it an alias? */
|
|
||||||
alias bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Tag analysis. */
|
|
||||||
tag_data struct {
|
|
||||||
/** The tag handle. */
|
|
||||||
handle []byte
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Scalar analysis. */
|
|
||||||
scalar_data struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** Does the scalar contain line breaks? */
|
|
||||||
multiline bool
|
|
||||||
/** Can the scalar be expessed in the flow plain style? */
|
|
||||||
flow_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the block plain style? */
|
|
||||||
block_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the single quoted style? */
|
|
||||||
single_quoted_allowed bool
|
|
||||||
/** Can the scalar be expressed in the literal or folded styles? */
|
|
||||||
block_allowed bool
|
|
||||||
/** The output style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the stream was already opened? */
|
|
||||||
opened bool
|
|
||||||
/** If the stream was already closed? */
|
|
||||||
closed bool
|
|
||||||
|
|
||||||
/** The information associated with the document nodes. */
|
|
||||||
anchors *struct {
|
|
||||||
/** The number of references. */
|
|
||||||
references int
|
|
||||||
/** The anchor id. */
|
|
||||||
anchor int
|
|
||||||
/** If the node has been emitted? */
|
|
||||||
serialized bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The last assigned anchor id. */
|
|
||||||
last_anchor_id int
|
|
||||||
|
|
||||||
/** The currently emitted document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
||||||
6
vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
6
vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
@ -92,12 +92,6 @@ func (p *PackageLogger) Fatal(args ...interface{}) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PackageLogger) Fatalln(args ...interface{}) {
|
|
||||||
s := fmt.Sprintln(args...)
|
|
||||||
p.internalLog(calldepth, CRITICAL, s)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error Functions
|
// Error Functions
|
||||||
|
|
||||||
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
|
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
|
||||||
|
|||||||
2
vendor/github.com/docker/libcompose/config/merge.go
generated
vendored
2
vendor/github.com/docker/libcompose/config/merge.go
generated
vendored
@ -6,10 +6,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"github.com/docker/docker/pkg/urlutil"
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
"github.com/docker/libcompose/utils"
|
"github.com/docker/libcompose/utils"
|
||||||
composeYaml "github.com/docker/libcompose/yaml"
|
composeYaml "github.com/docker/libcompose/yaml"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
2
vendor/github.com/docker/libcompose/config/merge_v1.go
generated
vendored
2
vendor/github.com/docker/libcompose/config/merge_v1.go
generated
vendored
@ -5,8 +5,8 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"github.com/docker/libcompose/utils"
|
"github.com/docker/libcompose/utils"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MergeServicesV1 merges a v1 compose file into an existing set of service configs
|
// MergeServicesV1 merges a v1 compose file into an existing set of service configs
|
||||||
|
|||||||
2
vendor/github.com/docker/libcompose/config/merge_v2.go
generated
vendored
2
vendor/github.com/docker/libcompose/config/merge_v2.go
generated
vendored
@ -5,8 +5,8 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"github.com/docker/libcompose/utils"
|
"github.com/docker/libcompose/utils"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MergeServicesV2 merges a v2 compose file into an existing set of service configs
|
// MergeServicesV2 merges a v2 compose file into an existing set of service configs
|
||||||
|
|||||||
29
vendor/github.com/docker/libcompose/config/types.go
generated
vendored
29
vendor/github.com/docker/libcompose/config/types.go
generated
vendored
@ -23,9 +23,9 @@ type ServiceConfigV1 struct {
|
|||||||
CapAdd []string `yaml:"cap_add,omitempty"`
|
CapAdd []string `yaml:"cap_add,omitempty"`
|
||||||
CapDrop []string `yaml:"cap_drop,omitempty"`
|
CapDrop []string `yaml:"cap_drop,omitempty"`
|
||||||
CgroupParent string `yaml:"cgroup_parent,omitempty"`
|
CgroupParent string `yaml:"cgroup_parent,omitempty"`
|
||||||
CPUQuota int64 `yaml:"cpu_quota,omitempty"`
|
CPUQuota yaml.StringorInt `yaml:"cpu_quota,omitempty"`
|
||||||
CPUSet string `yaml:"cpuset,omitempty"`
|
CPUSet string `yaml:"cpuset,omitempty"`
|
||||||
CPUShares int64 `yaml:"cpu_shares,omitempty"`
|
CPUShares yaml.StringorInt `yaml:"cpu_shares,omitempty"`
|
||||||
Command yaml.Command `yaml:"command,flow,omitempty"`
|
Command yaml.Command `yaml:"command,flow,omitempty"`
|
||||||
ContainerName string `yaml:"container_name,omitempty"`
|
ContainerName string `yaml:"container_name,omitempty"`
|
||||||
Devices []string `yaml:"devices,omitempty"`
|
Devices []string `yaml:"devices,omitempty"`
|
||||||
@ -42,8 +42,8 @@ type ServiceConfigV1 struct {
|
|||||||
Links yaml.MaporColonSlice `yaml:"links,omitempty"`
|
Links yaml.MaporColonSlice `yaml:"links,omitempty"`
|
||||||
LogDriver string `yaml:"log_driver,omitempty"`
|
LogDriver string `yaml:"log_driver,omitempty"`
|
||||||
MacAddress string `yaml:"mac_address,omitempty"`
|
MacAddress string `yaml:"mac_address,omitempty"`
|
||||||
MemLimit int64 `yaml:"mem_limit,omitempty"`
|
MemLimit yaml.StringorInt `yaml:"mem_limit,omitempty"`
|
||||||
MemSwapLimit int64 `yaml:"memswap_limit,omitempty"`
|
MemSwapLimit yaml.StringorInt `yaml:"memswap_limit,omitempty"`
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
Net string `yaml:"net,omitempty"`
|
Net string `yaml:"net,omitempty"`
|
||||||
Pid string `yaml:"pid,omitempty"`
|
Pid string `yaml:"pid,omitempty"`
|
||||||
@ -53,7 +53,7 @@ type ServiceConfigV1 struct {
|
|||||||
Privileged bool `yaml:"privileged,omitempty"`
|
Privileged bool `yaml:"privileged,omitempty"`
|
||||||
Restart string `yaml:"restart,omitempty"`
|
Restart string `yaml:"restart,omitempty"`
|
||||||
ReadOnly bool `yaml:"read_only,omitempty"`
|
ReadOnly bool `yaml:"read_only,omitempty"`
|
||||||
ShmSize int64 `yaml:"shm_size,omitempty"`
|
ShmSize yaml.StringorInt `yaml:"shm_size,omitempty"`
|
||||||
StdinOpen bool `yaml:"stdin_open,omitempty"`
|
StdinOpen bool `yaml:"stdin_open,omitempty"`
|
||||||
SecurityOpt []string `yaml:"security_opt,omitempty"`
|
SecurityOpt []string `yaml:"security_opt,omitempty"`
|
||||||
Tty bool `yaml:"tty,omitempty"`
|
Tty bool `yaml:"tty,omitempty"`
|
||||||
@ -81,8 +81,8 @@ type ServiceConfig struct {
|
|||||||
CapAdd []string `yaml:"cap_add,omitempty"`
|
CapAdd []string `yaml:"cap_add,omitempty"`
|
||||||
CapDrop []string `yaml:"cap_drop,omitempty"`
|
CapDrop []string `yaml:"cap_drop,omitempty"`
|
||||||
CPUSet string `yaml:"cpuset,omitempty"`
|
CPUSet string `yaml:"cpuset,omitempty"`
|
||||||
CPUShares int64 `yaml:"cpu_shares,omitempty"`
|
CPUShares yaml.StringorInt `yaml:"cpu_shares,omitempty"`
|
||||||
CPUQuota int64 `yaml:"cpu_quota,omitempty"`
|
CPUQuota yaml.StringorInt `yaml:"cpu_quota,omitempty"`
|
||||||
Command yaml.Command `yaml:"command,flow,omitempty"`
|
Command yaml.Command `yaml:"command,flow,omitempty"`
|
||||||
CgroupParent string `yaml:"cgroup_parent,omitempty"`
|
CgroupParent string `yaml:"cgroup_parent,omitempty"`
|
||||||
ContainerName string `yaml:"container_name,omitempty"`
|
ContainerName string `yaml:"container_name,omitempty"`
|
||||||
@ -105,18 +105,18 @@ type ServiceConfig struct {
|
|||||||
Links yaml.MaporColonSlice `yaml:"links,omitempty"`
|
Links yaml.MaporColonSlice `yaml:"links,omitempty"`
|
||||||
Logging Log `yaml:"logging,omitempty"`
|
Logging Log `yaml:"logging,omitempty"`
|
||||||
MacAddress string `yaml:"mac_address,omitempty"`
|
MacAddress string `yaml:"mac_address,omitempty"`
|
||||||
MemLimit int64 `yaml:"mem_limit,omitempty"`
|
MemLimit yaml.StringorInt `yaml:"mem_limit,omitempty"`
|
||||||
MemSwapLimit int64 `yaml:"memswap_limit,omitempty"`
|
MemSwapLimit yaml.StringorInt `yaml:"memswap_limit,omitempty"`
|
||||||
NetworkMode string `yaml:"network_mode,omitempty"`
|
NetworkMode string `yaml:"network_mode,omitempty"`
|
||||||
Networks *yaml.Networks `yaml:"networks,omitempty"`
|
Networks *yaml.Networks `yaml:"networks,omitempty"`
|
||||||
Pid string `yaml:"pid,omitempty"`
|
Pid string `yaml:"pid,omitempty"`
|
||||||
Ports []string `yaml:"ports,omitempty"`
|
Ports []string `yaml:"ports,omitempty"`
|
||||||
Privileged bool `yaml:"privileged,omitempty"`
|
Privileged bool `yaml:"privileged,omitempty"`
|
||||||
SecurityOpt []string `yaml:"security_opt,omitempty"`
|
SecurityOpt []string `yaml:"security_opt,omitempty"`
|
||||||
ShmSize int64 `yaml:"shm_size,omitempty"`
|
ShmSize yaml.StringorInt `yaml:"shm_size,omitempty"`
|
||||||
StopSignal string `yaml:"stop_signal,omitempty"`
|
StopSignal string `yaml:"stop_signal,omitempty"`
|
||||||
VolumeDriver string `yaml:"volume_driver,omitempty"`
|
VolumeDriver string `yaml:"volume_driver,omitempty"`
|
||||||
Volumes []string `yaml:"volumes,omitempty"`
|
Volumes *yaml.Volumes `yaml:"volumes,omitempty"`
|
||||||
VolumesFrom []string `yaml:"volumes_from,omitempty"`
|
VolumesFrom []string `yaml:"volumes_from,omitempty"`
|
||||||
Uts string `yaml:"uts,omitempty"`
|
Uts string `yaml:"uts,omitempty"`
|
||||||
Restart string `yaml:"restart,omitempty"`
|
Restart string `yaml:"restart,omitempty"`
|
||||||
@ -201,6 +201,13 @@ func (c *ServiceConfigs) Add(name string, service *ServiceConfig) {
|
|||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove removes the config with the specified name
|
||||||
|
func (c *ServiceConfigs) Remove(name string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
delete(c.m, name)
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Len returns the len of the configs
|
// Len returns the len of the configs
|
||||||
func (c *ServiceConfigs) Len() int {
|
func (c *ServiceConfigs) Len() int {
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
|
|||||||
52
vendor/github.com/docker/libcompose/docker/convert.go
generated
vendored
52
vendor/github.com/docker/libcompose/docker/convert.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
|||||||
composeclient "github.com/docker/libcompose/docker/client"
|
composeclient "github.com/docker/libcompose/docker/client"
|
||||||
"github.com/docker/libcompose/project"
|
"github.com/docker/libcompose/project"
|
||||||
"github.com/docker/libcompose/utils"
|
"github.com/docker/libcompose/utils"
|
||||||
|
// "github.com/docker/libcompose/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigWrapper wraps Config, HostConfig and NetworkingConfig for a container.
|
// ConfigWrapper wraps Config, HostConfig and NetworkingConfig for a container.
|
||||||
@ -36,6 +37,16 @@ func Filter(vs []string, f func(string) bool) []string {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toMap(vs []string) map[string]struct{} {
|
||||||
|
m := map[string]struct{}{}
|
||||||
|
for _, v := range vs {
|
||||||
|
if v != "" {
|
||||||
|
m[v] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
func isBind(s string) bool {
|
func isBind(s string) bool {
|
||||||
return strings.ContainsRune(s, ':')
|
return strings.ContainsRune(s, ':')
|
||||||
}
|
}
|
||||||
@ -58,21 +69,18 @@ func ConvertToAPI(serviceConfig *config.ServiceConfig, ctx project.Context, clie
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isNamedVolume(volume string) bool {
|
func volumes(c *config.ServiceConfig, ctx project.Context) []string {
|
||||||
return !strings.HasPrefix(volume, ".") && !strings.HasPrefix(volume, "/") && !strings.HasPrefix(volume, "~")
|
if c.Volumes == nil {
|
||||||
}
|
return []string{}
|
||||||
|
}
|
||||||
func volumes(c *config.ServiceConfig, ctx project.Context) map[string]struct{} {
|
volumes := make([]string, len(c.Volumes.Volumes))
|
||||||
volumes := make(map[string]struct{}, len(c.Volumes))
|
for _, v := range c.Volumes.Volumes {
|
||||||
for k, v := range c.Volumes {
|
vol := v
|
||||||
if len(ctx.ComposeFiles) > 0 && !isNamedVolume(v) {
|
if len(ctx.ComposeFiles) > 0 && !project.IsNamedVolume(v.Source) {
|
||||||
v = ctx.ResourceLookup.ResolvePath(v, ctx.ComposeFiles[0])
|
sourceVol := ctx.ResourceLookup.ResolvePath(v.String(), ctx.ComposeFiles[0])
|
||||||
}
|
vol.Source = strings.SplitN(sourceVol, ":", 2)[0]
|
||||||
|
|
||||||
c.Volumes[k] = v
|
|
||||||
if isVolume(v) {
|
|
||||||
volumes[v] = struct{}{}
|
|
||||||
}
|
}
|
||||||
|
volumes = append(volumes, vol.String())
|
||||||
}
|
}
|
||||||
return volumes
|
return volumes
|
||||||
}
|
}
|
||||||
@ -141,6 +149,8 @@ func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory compose
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vols := volumes(c, ctx)
|
||||||
|
|
||||||
config := &container.Config{
|
config := &container.Config{
|
||||||
Entrypoint: strslice.StrSlice(utils.CopySlice(c.Entrypoint)),
|
Entrypoint: strslice.StrSlice(utils.CopySlice(c.Entrypoint)),
|
||||||
Hostname: c.Hostname,
|
Hostname: c.Hostname,
|
||||||
@ -154,7 +164,7 @@ func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory compose
|
|||||||
Tty: c.Tty,
|
Tty: c.Tty,
|
||||||
OpenStdin: c.StdinOpen,
|
OpenStdin: c.StdinOpen,
|
||||||
WorkingDir: c.WorkingDir,
|
WorkingDir: c.WorkingDir,
|
||||||
Volumes: volumes(c, ctx),
|
Volumes: toMap(Filter(vols, isVolume)),
|
||||||
MacAddress: c.MacAddress,
|
MacAddress: c.MacAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,10 +181,10 @@ func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory compose
|
|||||||
|
|
||||||
resources := container.Resources{
|
resources := container.Resources{
|
||||||
CgroupParent: c.CgroupParent,
|
CgroupParent: c.CgroupParent,
|
||||||
Memory: c.MemLimit,
|
Memory: int64(c.MemLimit),
|
||||||
MemorySwap: c.MemSwapLimit,
|
MemorySwap: int64(c.MemSwapLimit),
|
||||||
CPUShares: c.CPUShares,
|
CPUShares: int64(c.CPUShares),
|
||||||
CPUQuota: c.CPUQuota,
|
CPUQuota: int64(c.CPUQuota),
|
||||||
CpusetCpus: c.CPUSet,
|
CpusetCpus: c.CPUSet,
|
||||||
Ulimits: ulimits,
|
Ulimits: ulimits,
|
||||||
Devices: deviceMappings,
|
Devices: deviceMappings,
|
||||||
@ -228,7 +238,7 @@ func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory compose
|
|||||||
CapDrop: strslice.StrSlice(utils.CopySlice(c.CapDrop)),
|
CapDrop: strslice.StrSlice(utils.CopySlice(c.CapDrop)),
|
||||||
ExtraHosts: utils.CopySlice(c.ExtraHosts),
|
ExtraHosts: utils.CopySlice(c.ExtraHosts),
|
||||||
Privileged: c.Privileged,
|
Privileged: c.Privileged,
|
||||||
Binds: Filter(c.Volumes, isBind),
|
Binds: Filter(vols, isBind),
|
||||||
DNS: utils.CopySlice(c.DNS),
|
DNS: utils.CopySlice(c.DNS),
|
||||||
DNSSearch: utils.CopySlice(c.DNSSearch),
|
DNSSearch: utils.CopySlice(c.DNSSearch),
|
||||||
LogConfig: container.LogConfig{
|
LogConfig: container.LogConfig{
|
||||||
@ -242,7 +252,7 @@ func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory compose
|
|||||||
IpcMode: container.IpcMode(c.Ipc),
|
IpcMode: container.IpcMode(c.Ipc),
|
||||||
PortBindings: portBindings,
|
PortBindings: portBindings,
|
||||||
RestartPolicy: *restartPolicy,
|
RestartPolicy: *restartPolicy,
|
||||||
ShmSize: c.ShmSize,
|
ShmSize: int64(c.ShmSize),
|
||||||
SecurityOpt: utils.CopySlice(c.SecurityOpt),
|
SecurityOpt: utils.CopySlice(c.SecurityOpt),
|
||||||
VolumeDriver: c.VolumeDriver,
|
VolumeDriver: c.VolumeDriver,
|
||||||
Resources: resources,
|
Resources: resources,
|
||||||
|
|||||||
8
vendor/github.com/docker/libcompose/docker/project.go
generated
vendored
8
vendor/github.com/docker/libcompose/docker/project.go
generated
vendored
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/docker/libcompose/config"
|
"github.com/docker/libcompose/config"
|
||||||
"github.com/docker/libcompose/docker/client"
|
"github.com/docker/libcompose/docker/client"
|
||||||
"github.com/docker/libcompose/docker/network"
|
"github.com/docker/libcompose/docker/network"
|
||||||
|
"github.com/docker/libcompose/docker/volume"
|
||||||
"github.com/docker/libcompose/labels"
|
"github.com/docker/libcompose/labels"
|
||||||
"github.com/docker/libcompose/lookup"
|
"github.com/docker/libcompose/lookup"
|
||||||
"github.com/docker/libcompose/project"
|
"github.com/docker/libcompose/project"
|
||||||
@ -66,6 +67,13 @@ func NewProject(context *Context, parseOptions *config.ParseOptions) (project.AP
|
|||||||
context.NetworksFactory = networksFactory
|
context.NetworksFactory = networksFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if context.VolumesFactory == nil {
|
||||||
|
volumesFactory := &volume.DockerFactory{
|
||||||
|
ClientFactory: context.ClientFactory,
|
||||||
|
}
|
||||||
|
context.VolumesFactory = volumesFactory
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME(vdemeester) Remove the context duplication ?
|
// FIXME(vdemeester) Remove the context duplication ?
|
||||||
runtime := &Project{
|
runtime := &Project{
|
||||||
clientFactory: context.ClientFactory,
|
clientFactory: context.ClientFactory,
|
||||||
|
|||||||
154
vendor/github.com/docker/libcompose/docker/volume/volume.go
generated
vendored
Normal file
154
vendor/github.com/docker/libcompose/docker/volume/volume.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
package volume
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/docker/engine-api/client"
|
||||||
|
"github.com/docker/engine-api/types"
|
||||||
|
"github.com/docker/libcompose/config"
|
||||||
|
|
||||||
|
composeclient "github.com/docker/libcompose/docker/client"
|
||||||
|
"github.com/docker/libcompose/project"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Volume holds attributes and method for a volume definition in compose
|
||||||
|
type Volume struct {
|
||||||
|
client client.VolumeAPIClient
|
||||||
|
projectName string
|
||||||
|
name string
|
||||||
|
driver string
|
||||||
|
driverOptions map[string]string
|
||||||
|
external bool
|
||||||
|
// TODO (shouze) missing labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Volume) fullName() string {
|
||||||
|
name := v.projectName + "_" + v.name
|
||||||
|
if v.external {
|
||||||
|
name = v.name
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inspect inspect the current volume
|
||||||
|
func (v *Volume) Inspect(ctx context.Context) (types.Volume, error) {
|
||||||
|
return v.client.VolumeInspect(ctx, v.fullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the current volume (from docker engine)
|
||||||
|
func (v *Volume) Remove(ctx context.Context) error {
|
||||||
|
if v.external {
|
||||||
|
fmt.Printf("Volume %s is external, skipping", v.fullName())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fmt.Printf("Removing volume %q\n", v.fullName())
|
||||||
|
return v.client.VolumeRemove(ctx, v.fullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureItExists make sure the volume exists and return an error if it does not exists
|
||||||
|
// and cannot be created.
|
||||||
|
func (v *Volume) EnsureItExists(ctx context.Context) error {
|
||||||
|
volumeResource, err := v.Inspect(ctx)
|
||||||
|
if v.external {
|
||||||
|
if client.IsErrVolumeNotFound(err) {
|
||||||
|
// FIXME(shouze) introduce some libcompose error type
|
||||||
|
return fmt.Errorf("Volume %s declared as external, but could not be found. Please create the volume manually using docker volume create %s and try again", v.name, v.name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err != nil && client.IsErrVolumeNotFound(err) {
|
||||||
|
return v.create(ctx)
|
||||||
|
}
|
||||||
|
if volumeResource.Driver != v.driver {
|
||||||
|
return fmt.Errorf("Volume %q needs to be recreated - driver has changed", v.name)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Volume) create(ctx context.Context) error {
|
||||||
|
fmt.Printf("Creating volume %q with driver %q\n", v.fullName(), v.driver)
|
||||||
|
_, err := v.client.VolumeCreate(ctx, types.VolumeCreateRequest{
|
||||||
|
Name: v.fullName(),
|
||||||
|
Driver: v.driver,
|
||||||
|
DriverOpts: v.driverOptions,
|
||||||
|
// TODO (shouze) missing labels
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVolume creates a new volume from the specified name and config.
|
||||||
|
func NewVolume(projectName, name string, config *config.VolumeConfig, client client.VolumeAPIClient) *Volume {
|
||||||
|
return &Volume{
|
||||||
|
client: client,
|
||||||
|
projectName: projectName,
|
||||||
|
name: name,
|
||||||
|
driver: config.Driver,
|
||||||
|
driverOptions: config.DriverOpts,
|
||||||
|
external: config.External.External,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Volumes holds a list of volume
|
||||||
|
type Volumes struct {
|
||||||
|
volumes []*Volume
|
||||||
|
volumeEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize make sure volume exists if volume is enabled
|
||||||
|
func (v *Volumes) Initialize(ctx context.Context) error {
|
||||||
|
if !v.volumeEnabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, volume := range v.volumes {
|
||||||
|
err := volume.EnsureItExists(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes volumes (clean-up)
|
||||||
|
func (v *Volumes) Remove(ctx context.Context) error {
|
||||||
|
if !v.volumeEnabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, volume := range v.volumes {
|
||||||
|
err := volume.Remove(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumesFromServices creates a new Volumes struct based on volumes configurations and
|
||||||
|
// services configuration. If a volume is defined but not used by any service, it will return
|
||||||
|
// an error along the Volumes.
|
||||||
|
func VolumesFromServices(cli client.VolumeAPIClient, projectName string, volumeConfigs map[string]*config.VolumeConfig, services *config.ServiceConfigs, volumeEnabled bool) (*Volumes, error) {
|
||||||
|
var err error
|
||||||
|
volumes := make([]*Volume, 0, len(volumeConfigs))
|
||||||
|
for name, config := range volumeConfigs {
|
||||||
|
volume := NewVolume(projectName, name, config, cli)
|
||||||
|
volumes = append(volumes, volume)
|
||||||
|
}
|
||||||
|
return &Volumes{
|
||||||
|
volumes: volumes,
|
||||||
|
volumeEnabled: volumeEnabled,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerFactory implements project.VolumesFactory
|
||||||
|
type DockerFactory struct {
|
||||||
|
ClientFactory composeclient.Factory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create implements project.VolumesFactory Create method.
|
||||||
|
// It creates a Volumes (that implements project.Volumes) from specified configurations.
|
||||||
|
func (f *DockerFactory) Create(projectName string, volumeConfigs map[string]*config.VolumeConfig, serviceConfigs *config.ServiceConfigs, volumeEnabled bool) (project.Volumes, error) {
|
||||||
|
cli := f.ClientFactory.Create(nil)
|
||||||
|
return VolumesFromServices(cli, projectName, volumeConfigs, serviceConfigs, volumeEnabled)
|
||||||
|
}
|
||||||
1
vendor/github.com/docker/libcompose/project/context.go
generated
vendored
1
vendor/github.com/docker/libcompose/project/context.go
generated
vendored
@ -25,6 +25,7 @@ type Context struct {
|
|||||||
isOpen bool
|
isOpen bool
|
||||||
ServiceFactory ServiceFactory
|
ServiceFactory ServiceFactory
|
||||||
NetworksFactory NetworksFactory
|
NetworksFactory NetworksFactory
|
||||||
|
VolumesFactory VolumesFactory
|
||||||
EnvironmentLookup config.EnvironmentLookup
|
EnvironmentLookup config.EnvironmentLookup
|
||||||
ResourceLookup config.ResourceLookup
|
ResourceLookup config.ResourceLookup
|
||||||
LoggerFactory logger.Factory
|
LoggerFactory logger.Factory
|
||||||
|
|||||||
17
vendor/github.com/docker/libcompose/project/interface.go
generated
vendored
17
vendor/github.com/docker/libcompose/project/interface.go
generated
vendored
@ -37,9 +37,24 @@ type APIProject interface {
|
|||||||
CreateService(name string) (Service, error)
|
CreateService(name string) (Service, error)
|
||||||
AddConfig(name string, config *config.ServiceConfig) error
|
AddConfig(name string, config *config.ServiceConfig) error
|
||||||
Load(bytes []byte) error
|
Load(bytes []byte) error
|
||||||
ListStoppedContainers(ctx context.Context, services ...string) ([]string, error)
|
Containers(ctx context.Context, filter Filter, services ...string) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Filter holds filter element to filter containers
|
||||||
|
type Filter struct {
|
||||||
|
State State
|
||||||
|
}
|
||||||
|
|
||||||
|
// State defines the supported state you can filter on
|
||||||
|
type State string
|
||||||
|
|
||||||
|
// Definitions of filter states
|
||||||
|
const (
|
||||||
|
AnyState = State("")
|
||||||
|
Running = State("running")
|
||||||
|
Stopped = State("stopped")
|
||||||
|
)
|
||||||
|
|
||||||
// RuntimeProject defines runtime-specific methods for a libcompose implementation.
|
// RuntimeProject defines runtime-specific methods for a libcompose implementation.
|
||||||
type RuntimeProject interface {
|
type RuntimeProject interface {
|
||||||
RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error
|
RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error
|
||||||
|
|||||||
131
vendor/github.com/docker/libcompose/project/project.go
generated
vendored
131
vendor/github.com/docker/libcompose/project/project.go
generated
vendored
@ -31,6 +31,7 @@ type Project struct {
|
|||||||
|
|
||||||
runtime RuntimeProject
|
runtime RuntimeProject
|
||||||
networks Networks
|
networks Networks
|
||||||
|
volumes Volumes
|
||||||
configVersion string
|
configVersion string
|
||||||
context *Context
|
context *Context
|
||||||
reload []string
|
reload []string
|
||||||
@ -205,6 +206,31 @@ func (p *Project) load(file string, bytes []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update network configuration a little bit
|
// Update network configuration a little bit
|
||||||
|
p.handleNetworkConfig()
|
||||||
|
p.handleVolumeConfig()
|
||||||
|
|
||||||
|
if p.context.NetworksFactory != nil {
|
||||||
|
networks, err := p.context.NetworksFactory.Create(p.Name, p.NetworkConfigs, p.ServiceConfigs, p.isNetworkEnabled())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.networks = networks
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.context.VolumesFactory != nil {
|
||||||
|
volumes, err := p.context.VolumesFactory.Create(p.Name, p.VolumeConfigs, p.ServiceConfigs, p.isVolumeEnabled())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.volumes = volumes
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Project) handleNetworkConfig() {
|
||||||
if p.isNetworkEnabled() {
|
if p.isNetworkEnabled() {
|
||||||
for _, serviceName := range p.ServiceConfigs.Keys() {
|
for _, serviceName := range p.ServiceConfigs.Keys() {
|
||||||
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
|
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
|
||||||
@ -238,24 +264,47 @@ func (p *Project) load(file string, bytes []byte) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME(vdemeester) Not sure about this..
|
|
||||||
if p.context.NetworksFactory != nil {
|
|
||||||
networks, err := p.context.NetworksFactory.Create(p.Name, p.NetworkConfigs, p.ServiceConfigs, p.isNetworkEnabled())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.networks = networks
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Project) isNetworkEnabled() bool {
|
func (p *Project) isNetworkEnabled() bool {
|
||||||
return p.configVersion == "2"
|
return p.configVersion == "2"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Project) handleVolumeConfig() {
|
||||||
|
if p.isVolumeEnabled() {
|
||||||
|
for _, serviceName := range p.ServiceConfigs.Keys() {
|
||||||
|
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
|
||||||
|
// Consolidate the name of the volume
|
||||||
|
// FIXME(vdemeester) probably shouldn't be there, maybe move that to interface/factory
|
||||||
|
if serviceConfig.Volumes == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, volume := range serviceConfig.Volumes.Volumes {
|
||||||
|
if !IsNamedVolume(volume.Source) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
vol, ok := p.VolumeConfigs[volume.Source]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if vol.External.External {
|
||||||
|
if vol.External.Name != "" {
|
||||||
|
volume.Source = vol.External.Name
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
volume.Source = p.Name + "_" + volume.Source
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Project) isVolumeEnabled() bool {
|
||||||
|
return p.configVersion == "2"
|
||||||
|
}
|
||||||
|
|
||||||
// initialize sets up required element for project before any action (on project and service).
|
// initialize sets up required element for project before any action (on project and service).
|
||||||
// This means it's not needed to be called on Config for example.
|
// This means it's not needed to be called on Config for example.
|
||||||
func (p *Project) initialize(ctx context.Context) error {
|
func (p *Project) initialize(ctx context.Context) error {
|
||||||
@ -264,7 +313,11 @@ func (p *Project) initialize(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO Initialize volumes
|
if p.volumes != nil {
|
||||||
|
if err := p.volumes.Initialize(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,6 +390,16 @@ func (p *Project) Down(ctx context.Context, opts options.Down, services ...strin
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.RemoveVolume {
|
||||||
|
volumes, err := p.context.VolumesFactory.Create(p.Name, p.VolumeConfigs, p.ServiceConfigs, p.isVolumeEnabled())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := volumes.Remove(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return p.forEach([]string{}, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
|
return p.forEach([]string{}, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
|
||||||
wrapper.Do(wrappers, events.NoEvent, events.NoEvent, func(service Service) error {
|
wrapper.Do(wrappers, events.NoEvent, events.NoEvent, func(service Service) error {
|
||||||
return service.RemoveImage(ctx, opts.RemoveImages)
|
return service.RemoveImage(ctx, opts.RemoveImages)
|
||||||
@ -496,37 +559,50 @@ func (p *Project) Pull(ctx context.Context, services ...string) error {
|
|||||||
}), nil)
|
}), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListStoppedContainers lists the stopped containers for the specified services.
|
// Containers lists the containers for the specified services. Can be filter using
|
||||||
func (p *Project) ListStoppedContainers(ctx context.Context, services ...string) ([]string, error) {
|
// the Filter struct.
|
||||||
stoppedContainers := []string{}
|
func (p *Project) Containers(ctx context.Context, filter Filter, services ...string) ([]string, error) {
|
||||||
|
containers := []string{}
|
||||||
err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
|
err := p.forEach(services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
|
||||||
wrapper.Do(nil, events.NoEvent, events.NoEvent, func(service Service) error {
|
wrapper.Do(nil, events.NoEvent, events.NoEvent, func(service Service) error {
|
||||||
containers, innerErr := service.Containers(ctx)
|
serviceContainers, innerErr := service.Containers(ctx)
|
||||||
if innerErr != nil {
|
if innerErr != nil {
|
||||||
return innerErr
|
return innerErr
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, container := range containers {
|
for _, container := range serviceContainers {
|
||||||
running, innerErr := container.IsRunning(ctx)
|
running, innerErr := container.IsRunning(ctx)
|
||||||
if innerErr != nil {
|
if innerErr != nil {
|
||||||
log.Error(innerErr)
|
log.Error(innerErr)
|
||||||
}
|
}
|
||||||
if !running {
|
switch filter.State {
|
||||||
containerID, innerErr := container.ID()
|
case Running:
|
||||||
if innerErr != nil {
|
if !running {
|
||||||
log.Error(innerErr)
|
continue
|
||||||
}
|
}
|
||||||
stoppedContainers = append(stoppedContainers, containerID)
|
case Stopped:
|
||||||
|
if running {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case AnyState:
|
||||||
|
// Don't do a thing
|
||||||
|
default:
|
||||||
|
// Invalid state filter
|
||||||
|
return fmt.Errorf("Invalid container filter: %s", filter.State)
|
||||||
}
|
}
|
||||||
|
containerID, innerErr := container.ID()
|
||||||
|
if innerErr != nil {
|
||||||
|
log.Error(innerErr)
|
||||||
|
}
|
||||||
|
containers = append(containers, containerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}), nil)
|
}), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return stoppedContainers, nil
|
return containers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes the specified services (like docker rm).
|
// Delete removes the specified services (like docker rm).
|
||||||
@ -733,3 +809,8 @@ func (p *Project) Notify(eventType events.EventType, serviceName string, data ma
|
|||||||
l <- event
|
l <- event
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsNamedVolume returns whether the specified volume (string) is a named volume or not.
|
||||||
|
func IsNamedVolume(volume string) bool {
|
||||||
|
return !strings.HasPrefix(volume, ".") && !strings.HasPrefix(volume, "/") && !strings.HasPrefix(volume, "~")
|
||||||
|
}
|
||||||
|
|||||||
19
vendor/github.com/docker/libcompose/project/volume.go
generated
vendored
Normal file
19
vendor/github.com/docker/libcompose/project/volume.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package project
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/docker/libcompose/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Volumes defines the methods a libcompose volume aggregate should define.
|
||||||
|
type Volumes interface {
|
||||||
|
Initialize(ctx context.Context) error
|
||||||
|
Remove(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumesFactory is an interface factory to create Volumes object for the specified
|
||||||
|
// configurations (service, volumes, …)
|
||||||
|
type VolumesFactory interface {
|
||||||
|
Create(projectName string, volumeConfigs map[string]*config.VolumeConfig, serviceConfigs *config.ServiceConfigs, volumeEnabled bool) (Volumes, error)
|
||||||
|
}
|
||||||
2
vendor/github.com/docker/libcompose/utils/util.go
generated
vendored
2
vendor/github.com/docker/libcompose/utils/util.go
generated
vendored
@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
|
||||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// InParallel holds a pool and a waitgroup to execute tasks in parallel and to be able
|
// InParallel holds a pool and a waitgroup to execute tasks in parallel and to be able
|
||||||
|
|||||||
2
vendor/github.com/docker/libcompose/version/version.go
generated
vendored
2
vendor/github.com/docker/libcompose/version/version.go
generated
vendored
@ -2,7 +2,7 @@ package version
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// VERSION should be updated by hand at each release
|
// VERSION should be updated by hand at each release
|
||||||
VERSION = "0.3.0-dev"
|
VERSION = "0.3.0"
|
||||||
|
|
||||||
// GITCOMMIT will be overwritten automatically by the build system
|
// GITCOMMIT will be overwritten automatically by the build system
|
||||||
GITCOMMIT = "HEAD"
|
GITCOMMIT = "HEAD"
|
||||||
|
|||||||
29
vendor/github.com/docker/libcompose/yaml/build.go
generated
vendored
29
vendor/github.com/docker/libcompose/yaml/build.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -15,7 +16,7 @@ type Build struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the Marshaller interface.
|
// MarshalYAML implements the Marshaller interface.
|
||||||
func (b Build) MarshalYAML() (tag string, value interface{}, err error) {
|
func (b Build) MarshalYAML() (interface{}, error) {
|
||||||
m := map[string]interface{}{}
|
m := map[string]interface{}{}
|
||||||
if b.Context != "" {
|
if b.Context != "" {
|
||||||
m["context"] = b.Context
|
m["context"] = b.Context
|
||||||
@ -26,16 +27,20 @@ func (b Build) MarshalYAML() (tag string, value interface{}, err error) {
|
|||||||
if len(b.Args) > 0 {
|
if len(b.Args) > 0 {
|
||||||
m["args"] = b.Args
|
m["args"] = b.Args
|
||||||
}
|
}
|
||||||
return "", m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (b *Build) UnmarshalYAML(tag string, value interface{}) error {
|
func (b *Build) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch v := value.(type) {
|
var stringType string
|
||||||
case string:
|
if err := unmarshal(&stringType); err == nil {
|
||||||
b.Context = v
|
b.Context = stringType
|
||||||
case map[interface{}]interface{}:
|
return nil
|
||||||
for mapKey, mapValue := range v {
|
}
|
||||||
|
|
||||||
|
var mapType map[interface{}]interface{}
|
||||||
|
if err := unmarshal(&mapType); err == nil {
|
||||||
|
for mapKey, mapValue := range mapType {
|
||||||
switch mapKey {
|
switch mapKey {
|
||||||
case "context":
|
case "context":
|
||||||
b.Context = mapValue.(string)
|
b.Context = mapValue.(string)
|
||||||
@ -52,10 +57,10 @@ func (b *Build) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
return nil
|
||||||
return fmt.Errorf("Failed to unmarshal Build: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return errors.New("Failed to unmarshal Build")
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBuildArgs(value interface{}) (map[string]string, error) {
|
func handleBuildArgs(value interface{}) (map[string]string, error) {
|
||||||
@ -98,6 +103,8 @@ func handleBuildArgMap(m map[interface{}]interface{}) (map[string]string, error)
|
|||||||
switch a := mapValue.(type) {
|
switch a := mapValue.(type) {
|
||||||
case string:
|
case string:
|
||||||
argValue = a
|
argValue = a
|
||||||
|
case int:
|
||||||
|
argValue = strconv.Itoa(a)
|
||||||
case int64:
|
case int64:
|
||||||
argValue = strconv.Itoa(int(a))
|
argValue = strconv.Itoa(int(a))
|
||||||
default:
|
default:
|
||||||
|
|||||||
36
vendor/github.com/docker/libcompose/yaml/command.go
generated
vendored
36
vendor/github.com/docker/libcompose/yaml/command.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/docker/engine-api/types/strslice"
|
"github.com/docker/engine-api/types/strslice"
|
||||||
@ -11,22 +12,31 @@ import (
|
|||||||
type Command strslice.StrSlice
|
type Command strslice.StrSlice
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *Command) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *Command) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch value := value.(type) {
|
var stringType string
|
||||||
case []interface{}:
|
if err := unmarshal(&stringType); err == nil {
|
||||||
parts, err := toStrings(value)
|
parts, err := shlex.Split(stringType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*s = parts
|
*s = parts
|
||||||
case string:
|
return nil
|
||||||
parts, err := shlex.Split(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*s = parts
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Failed to unmarshal Command: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
var sliceType []interface{}
|
||||||
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
|
parts, err := toStrings(sliceType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*s = parts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var interfaceType interface{}
|
||||||
|
if err := unmarshal(&interfaceType); err == nil {
|
||||||
|
fmt.Println(interfaceType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("Failed to unmarshal Command")
|
||||||
}
|
}
|
||||||
|
|||||||
41
vendor/github.com/docker/libcompose/yaml/external.go
generated
vendored
41
vendor/github.com/docker/libcompose/yaml/external.go
generated
vendored
@ -1,9 +1,5 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// External represents an external network entry in compose file.
|
// External represents an external network entry in compose file.
|
||||||
// It can be a boolean (true|false) or have a name
|
// It can be a boolean (true|false) or have a name
|
||||||
type External struct {
|
type External struct {
|
||||||
@ -12,33 +8,30 @@ type External struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the Marshaller interface.
|
// MarshalYAML implements the Marshaller interface.
|
||||||
func (n External) MarshalYAML() (tag string, value interface{}, err error) {
|
func (n External) MarshalYAML() (interface{}, error) {
|
||||||
if n.Name == "" {
|
if n.Name == "" {
|
||||||
return "", n.External, nil
|
return n.External, nil
|
||||||
}
|
}
|
||||||
return "", map[string]interface{}{
|
return map[string]interface{}{
|
||||||
"name": n.Name,
|
"name": n.Name,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (n *External) UnmarshalYAML(tag string, value interface{}) error {
|
func (n *External) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch v := value.(type) {
|
if err := unmarshal(&n.External); err == nil {
|
||||||
case bool:
|
return nil
|
||||||
n.External = v
|
|
||||||
case map[interface{}]interface{}:
|
|
||||||
for mapKey, mapValue := range v {
|
|
||||||
switch mapKey {
|
|
||||||
case "name":
|
|
||||||
n.Name = mapValue.(string)
|
|
||||||
default:
|
|
||||||
// Ignore unknown keys
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n.External = true
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Failed to unmarshal External: %#v", value)
|
|
||||||
}
|
}
|
||||||
|
var dummyExternal struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
err := unmarshal(&dummyExternal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Name = dummyExternal.Name
|
||||||
|
n.External = true
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
27
vendor/github.com/docker/libcompose/yaml/network.go
generated
vendored
27
vendor/github.com/docker/libcompose/yaml/network.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,20 +21,20 @@ type Network struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the Marshaller interface.
|
// MarshalYAML implements the Marshaller interface.
|
||||||
func (n Networks) MarshalYAML() (tag string, value interface{}, err error) {
|
func (n Networks) MarshalYAML() (interface{}, error) {
|
||||||
m := map[string]*Network{}
|
m := map[string]*Network{}
|
||||||
for _, network := range n.Networks {
|
for _, network := range n.Networks {
|
||||||
m[network.Name] = network
|
m[network.Name] = network
|
||||||
}
|
}
|
||||||
return "", m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (n *Networks) UnmarshalYAML(tag string, value interface{}) error {
|
func (n *Networks) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch v := value.(type) {
|
var sliceType []interface{}
|
||||||
case []interface{}:
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
n.Networks = []*Network{}
|
n.Networks = []*Network{}
|
||||||
for _, network := range v {
|
for _, network := range sliceType {
|
||||||
name, ok := network.(string)
|
name, ok := network.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
||||||
@ -42,9 +43,13 @@ func (n *Networks) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
Name: name,
|
Name: name,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
case map[interface{}]interface{}:
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var mapType map[interface{}]interface{}
|
||||||
|
if err := unmarshal(&mapType); err == nil {
|
||||||
n.Networks = []*Network{}
|
n.Networks = []*Network{}
|
||||||
for mapKey, mapValue := range v {
|
for mapKey, mapValue := range mapType {
|
||||||
name, ok := mapKey.(string)
|
name, ok := mapKey.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
||||||
@ -55,10 +60,10 @@ func (n *Networks) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
}
|
}
|
||||||
n.Networks = append(n.Networks, network)
|
n.Networks = append(n.Networks, network)
|
||||||
}
|
}
|
||||||
default:
|
return nil
|
||||||
return fmt.Errorf("Failed to unmarshal Networks: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return errors.New("Failed to unmarshal Networks")
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNetwork(name string, value interface{}) (*Network, error) {
|
func handleNetwork(name string, value interface{}) (*Network, error) {
|
||||||
|
|||||||
129
vendor/github.com/docker/libcompose/yaml/types_yaml.go
generated
vendored
129
vendor/github.com/docker/libcompose/yaml/types_yaml.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -8,50 +9,65 @@ import (
|
|||||||
"github.com/docker/engine-api/types/strslice"
|
"github.com/docker/engine-api/types/strslice"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stringorslice represents a string or an array of strings.
|
// StringorInt represents a string or an integer.
|
||||||
// Using engine-api Strslice and augment it with YAML marshalling stuff.
|
type StringorInt int64
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
|
func (s *StringorInt) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var intType int64
|
||||||
|
if err := unmarshal(&intType); err == nil {
|
||||||
|
*s = StringorInt(intType)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var stringType string
|
||||||
|
if err := unmarshal(&stringType); err == nil {
|
||||||
|
intType, err := strconv.ParseInt(stringType, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*s = StringorInt(intType)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("Failed to unmarshal StringorInt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stringorslice represents
|
||||||
|
// Using engine-api Strslice and augment it with YAML marshalling stuff. a string or an array of strings.
|
||||||
type Stringorslice strslice.StrSlice
|
type Stringorslice strslice.StrSlice
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *Stringorslice) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *Stringorslice) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch value := value.(type) {
|
var stringType string
|
||||||
case []interface{}:
|
if err := unmarshal(&stringType); err == nil {
|
||||||
parts, err := toStrings(value)
|
*s = []string{stringType}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var sliceType []interface{}
|
||||||
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
|
parts, err := toStrings(sliceType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*s = parts
|
*s = parts
|
||||||
case string:
|
return nil
|
||||||
*s = []string{value}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Failed to unmarshal Stringorslice: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return errors.New("Failed to unmarshal Stringorslice")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SliceorMap represents a slice or a map of strings.
|
// SliceorMap represents a slice or a map of strings.
|
||||||
type SliceorMap map[string]string
|
type SliceorMap map[string]string
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *SliceorMap) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
switch value := value.(type) {
|
|
||||||
case map[interface{}]interface{}:
|
var sliceType []interface{}
|
||||||
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
parts := map[string]string{}
|
parts := map[string]string{}
|
||||||
for k, v := range value {
|
for _, s := range sliceType {
|
||||||
if sk, ok := k.(string); ok {
|
|
||||||
if sv, ok := v.(string); ok {
|
|
||||||
parts[sk] = sv
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*s = parts
|
|
||||||
case []interface{}:
|
|
||||||
parts := map[string]string{}
|
|
||||||
for _, s := range value {
|
|
||||||
if str, ok := s.(string); ok {
|
if str, ok := s.(string); ok {
|
||||||
str := strings.TrimSpace(str)
|
str := strings.TrimSpace(str)
|
||||||
keyValueSlice := strings.SplitN(str, "=", 2)
|
keyValueSlice := strings.SplitN(str, "=", 2)
|
||||||
@ -67,10 +83,28 @@ func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
*s = parts
|
*s = parts
|
||||||
default:
|
return nil
|
||||||
return fmt.Errorf("Failed to unmarshal SliceorMap: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
var mapType map[interface{}]interface{}
|
||||||
|
if err := unmarshal(&mapType); err == nil {
|
||||||
|
parts := map[string]string{}
|
||||||
|
for k, v := range mapType {
|
||||||
|
if sk, ok := k.(string); ok {
|
||||||
|
if sv, ok := v.(string); ok {
|
||||||
|
parts[sk] = sv
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*s = parts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("Failed to unmarshal SliceorMap")
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaporEqualSlice represents a slice of strings that gets unmarshal from a
|
// MaporEqualSlice represents a slice of strings that gets unmarshal from a
|
||||||
@ -78,8 +112,8 @@ func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
type MaporEqualSlice []string
|
type MaporEqualSlice []string
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *MaporEqualSlice) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *MaporEqualSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
parts, err := unmarshalToStringOrSepMapParts(value, "=")
|
parts, err := unmarshalToStringOrSepMapParts(unmarshal, "=")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -97,8 +131,8 @@ func (s *MaporEqualSlice) ToMap() map[string]string {
|
|||||||
type MaporColonSlice []string
|
type MaporColonSlice []string
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *MaporColonSlice) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *MaporColonSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
parts, err := unmarshalToStringOrSepMapParts(value, ":")
|
parts, err := unmarshalToStringOrSepMapParts(unmarshal, ":")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -116,8 +150,8 @@ func (s *MaporColonSlice) ToMap() map[string]string {
|
|||||||
type MaporSpaceSlice []string
|
type MaporSpaceSlice []string
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (s *MaporSpaceSlice) UnmarshalYAML(tag string, value interface{}) error {
|
func (s *MaporSpaceSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
parts, err := unmarshalToStringOrSepMapParts(value, " ")
|
parts, err := unmarshalToStringOrSepMapParts(unmarshal, " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -130,15 +164,16 @@ func (s *MaporSpaceSlice) ToMap() map[string]string {
|
|||||||
return toMap(*s, " ")
|
return toMap(*s, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalToStringOrSepMapParts(value interface{}, key string) ([]string, error) {
|
func unmarshalToStringOrSepMapParts(unmarshal func(interface{}) error, key string) ([]string, error) {
|
||||||
switch value := value.(type) {
|
var sliceType []interface{}
|
||||||
case []interface{}:
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
return toStrings(value)
|
return toStrings(sliceType)
|
||||||
case map[interface{}]interface{}:
|
|
||||||
return toSepMapParts(value, key)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Failed to unmarshal Map or Slice: %#v", value)
|
|
||||||
}
|
}
|
||||||
|
var mapType map[interface{}]interface{}
|
||||||
|
if err := unmarshal(&mapType); err == nil {
|
||||||
|
return toSepMapParts(mapType, key)
|
||||||
|
}
|
||||||
|
return nil, errors.New("Failed to unmarshal MaporSlice")
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, error) {
|
func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, error) {
|
||||||
@ -150,6 +185,8 @@ func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, err
|
|||||||
if sk, ok := k.(string); ok {
|
if sk, ok := k.(string); ok {
|
||||||
if sv, ok := v.(string); ok {
|
if sv, ok := v.(string); ok {
|
||||||
parts = append(parts, sk+sep+sv)
|
parts = append(parts, sk+sep+sv)
|
||||||
|
} else if sv, ok := v.(int); ok {
|
||||||
|
parts = append(parts, sk+sep+strconv.Itoa(sv))
|
||||||
} else if sv, ok := v.(int64); ok {
|
} else if sv, ok := v.(int64); ok {
|
||||||
parts = append(parts, sk+sep+strconv.FormatInt(sv, 10))
|
parts = append(parts, sk+sep+strconv.FormatInt(sv, 10))
|
||||||
} else if v == nil {
|
} else if v == nil {
|
||||||
|
|||||||
36
vendor/github.com/docker/libcompose/yaml/ulimit.go
generated
vendored
36
vendor/github.com/docker/libcompose/yaml/ulimit.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package yaml
|
package yaml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
@ -12,29 +13,30 @@ type Ulimits struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the Marshaller interface.
|
// MarshalYAML implements the Marshaller interface.
|
||||||
func (u Ulimits) MarshalYAML() (tag string, value interface{}, err error) {
|
func (u Ulimits) MarshalYAML() (interface{}, error) {
|
||||||
ulimitMap := make(map[string]Ulimit)
|
ulimitMap := make(map[string]Ulimit)
|
||||||
for _, ulimit := range u.Elements {
|
for _, ulimit := range u.Elements {
|
||||||
ulimitMap[ulimit.Name] = ulimit
|
ulimitMap[ulimit.Name] = ulimit
|
||||||
}
|
}
|
||||||
return "", ulimitMap, nil
|
return ulimitMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the Unmarshaller interface.
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
func (u *Ulimits) UnmarshalYAML(tag string, value interface{}) error {
|
func (u *Ulimits) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
ulimits := make(map[string]Ulimit)
|
ulimits := make(map[string]Ulimit)
|
||||||
switch v := value.(type) {
|
|
||||||
case map[interface{}]interface{}:
|
var mapType map[interface{}]interface{}
|
||||||
for mapKey, mapValue := range v {
|
if err := unmarshal(&mapType); err == nil {
|
||||||
|
for mapKey, mapValue := range mapType {
|
||||||
name, ok := mapKey.(string)
|
name, ok := mapKey.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
||||||
}
|
}
|
||||||
var soft, hard int64
|
var soft, hard int64
|
||||||
switch mv := mapValue.(type) {
|
switch mv := mapValue.(type) {
|
||||||
case int64:
|
case int:
|
||||||
soft = mv
|
soft = int64(mv)
|
||||||
hard = mv
|
hard = int64(mv)
|
||||||
case map[interface{}]interface{}:
|
case map[interface{}]interface{}:
|
||||||
if len(mv) != 2 {
|
if len(mv) != 2 {
|
||||||
return fmt.Errorf("Failed to unmarshal Ulimit: %#v", mapValue)
|
return fmt.Errorf("Failed to unmarshal Ulimit: %#v", mapValue)
|
||||||
@ -42,9 +44,9 @@ func (u *Ulimits) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
for mkey, mvalue := range mv {
|
for mkey, mvalue := range mv {
|
||||||
switch mkey {
|
switch mkey {
|
||||||
case "soft":
|
case "soft":
|
||||||
soft = mvalue.(int64)
|
soft = int64(mvalue.(int))
|
||||||
case "hard":
|
case "hard":
|
||||||
hard = mvalue.(int64)
|
hard = int64(mvalue.(int))
|
||||||
default:
|
default:
|
||||||
// FIXME(vdemeester) Should we ignore or fail ?
|
// FIXME(vdemeester) Should we ignore or fail ?
|
||||||
continue
|
continue
|
||||||
@ -69,10 +71,10 @@ func (u *Ulimits) UnmarshalYAML(tag string, value interface{}) error {
|
|||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
u.Elements = append(u.Elements, ulimits[key])
|
u.Elements = append(u.Elements, ulimits[key])
|
||||||
}
|
}
|
||||||
default:
|
return nil
|
||||||
return fmt.Errorf("Failed to unmarshal Ulimit: %#v", value)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return errors.New("Failed to unmarshal Ulimit")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ulimit represents ulimit information.
|
// Ulimit represents ulimit information.
|
||||||
@ -87,11 +89,11 @@ type ulimitValues struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the Marshaller interface.
|
// MarshalYAML implements the Marshaller interface.
|
||||||
func (u Ulimit) MarshalYAML() (tag string, value interface{}, err error) {
|
func (u Ulimit) MarshalYAML() (interface{}, error) {
|
||||||
if u.Soft == u.Hard {
|
if u.Soft == u.Hard {
|
||||||
return "", u.Soft, nil
|
return u.Soft, nil
|
||||||
}
|
}
|
||||||
return "", u.ulimitValues, err
|
return u.ulimitValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUlimit creates a Ulimit based on the specified parts.
|
// NewUlimit creates a Ulimit based on the specified parts.
|
||||||
|
|||||||
83
vendor/github.com/docker/libcompose/yaml/volume.go
generated
vendored
Normal file
83
vendor/github.com/docker/libcompose/yaml/volume.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Volumes represents a list of service volumes in compose file.
|
||||||
|
// It has several representation, hence this specific struct.
|
||||||
|
type Volumes struct {
|
||||||
|
Volumes []*Volume
|
||||||
|
}
|
||||||
|
|
||||||
|
// Volume represent a service volume
|
||||||
|
type Volume struct {
|
||||||
|
Source string `yaml:"-"`
|
||||||
|
Destination string `yaml:"-"`
|
||||||
|
AccessMode string `yaml:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the Stringer interface.
|
||||||
|
func (v *Volume) String() string {
|
||||||
|
var paths []string
|
||||||
|
if v.Source != "" {
|
||||||
|
paths = []string{v.Source, v.Destination}
|
||||||
|
} else {
|
||||||
|
paths = []string{v.Destination}
|
||||||
|
}
|
||||||
|
if v.AccessMode != "" {
|
||||||
|
paths = append(paths, v.AccessMode)
|
||||||
|
}
|
||||||
|
return strings.Join(paths, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the Marshaller interface.
|
||||||
|
func (v Volumes) MarshalYAML() (interface{}, error) {
|
||||||
|
vs := []string{}
|
||||||
|
for _, volume := range v.Volumes {
|
||||||
|
vs = append(vs, volume.String())
|
||||||
|
}
|
||||||
|
return vs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the Unmarshaller interface.
|
||||||
|
func (v *Volumes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var sliceType []interface{}
|
||||||
|
if err := unmarshal(&sliceType); err == nil {
|
||||||
|
v.Volumes = []*Volume{}
|
||||||
|
for _, volume := range sliceType {
|
||||||
|
name, ok := volume.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Cannot unmarshal '%v' to type %T into a string value", name, name)
|
||||||
|
}
|
||||||
|
elts := strings.SplitN(name, ":", 3)
|
||||||
|
var vol *Volume
|
||||||
|
switch {
|
||||||
|
case len(elts) == 1:
|
||||||
|
vol = &Volume{
|
||||||
|
Destination: elts[0],
|
||||||
|
}
|
||||||
|
case len(elts) == 2:
|
||||||
|
vol = &Volume{
|
||||||
|
Source: elts[0],
|
||||||
|
Destination: elts[1],
|
||||||
|
}
|
||||||
|
case len(elts) == 3:
|
||||||
|
vol = &Volume{
|
||||||
|
Source: elts[0],
|
||||||
|
Destination: elts[1],
|
||||||
|
AccessMode: elts[2],
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// FIXME
|
||||||
|
return fmt.Errorf("")
|
||||||
|
}
|
||||||
|
v.Volumes = append(v.Volumes, vol)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("Failed to unmarshal Volumes")
|
||||||
|
}
|
||||||
25
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
25
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -46,7 +46,10 @@ func ResponseFormat(h http.Header) Format {
|
|||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
const textType = "text/plain"
|
const (
|
||||||
|
textType = "text/plain"
|
||||||
|
jsonType = "application/json"
|
||||||
|
)
|
||||||
|
|
||||||
switch mediatype {
|
switch mediatype {
|
||||||
case ProtoType:
|
case ProtoType:
|
||||||
@ -63,6 +66,22 @@ func ResponseFormat(h http.Header) Format {
|
|||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
return FmtText
|
return FmtText
|
||||||
|
|
||||||
|
case jsonType:
|
||||||
|
var prometheusAPIVersion string
|
||||||
|
|
||||||
|
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
||||||
|
prometheusAPIVersion = params["version"]
|
||||||
|
} else {
|
||||||
|
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch prometheusAPIVersion {
|
||||||
|
case "0.0.2", "":
|
||||||
|
return fmtJSON2
|
||||||
|
default:
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
@ -74,6 +93,8 @@ func NewDecoder(r io.Reader, format Format) Decoder {
|
|||||||
switch format {
|
switch format {
|
||||||
case FmtProtoDelim:
|
case FmtProtoDelim:
|
||||||
return &protoDecoder{r: r}
|
return &protoDecoder{r: r}
|
||||||
|
case fmtJSON2:
|
||||||
|
return newJSON2Decoder(r)
|
||||||
}
|
}
|
||||||
return &textDecoder{r: r}
|
return &textDecoder{r: r}
|
||||||
}
|
}
|
||||||
@ -111,7 +132,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// textDecoder implements the Decoder interface for the text protocol.
|
// textDecoder implements the Decoder interface for the text protcol.
|
||||||
type textDecoder struct {
|
type textDecoder struct {
|
||||||
r io.Reader
|
r io.Reader
|
||||||
p TextParser
|
p TextParser
|
||||||
|
|||||||
174
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
Normal file
174
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type json2Decoder struct {
|
||||||
|
dec *json.Decoder
|
||||||
|
fams []*dto.MetricFamily
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJSON2Decoder(r io.Reader) Decoder {
|
||||||
|
return &json2Decoder{
|
||||||
|
dec: json.NewDecoder(r),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type histogram002 struct {
|
||||||
|
Labels model.LabelSet `json:"labels"`
|
||||||
|
Values map[string]float64 `json:"value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type counter002 struct {
|
||||||
|
Labels model.LabelSet `json:"labels"`
|
||||||
|
Value float64 `json:"value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
|
||||||
|
labels := base.Clone().Merge(ext)
|
||||||
|
delete(labels, model.MetricNameLabel)
|
||||||
|
|
||||||
|
names := make([]string, 0, len(labels))
|
||||||
|
for ln := range labels {
|
||||||
|
names = append(names, string(ln))
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
|
pairs := make([]*dto.LabelPair, 0, len(labels))
|
||||||
|
|
||||||
|
for _, ln := range names {
|
||||||
|
if !model.LabelNameRE.MatchString(ln) {
|
||||||
|
return nil, fmt.Errorf("invalid label name %q", ln)
|
||||||
|
}
|
||||||
|
lv := labels[model.LabelName(ln)]
|
||||||
|
|
||||||
|
pairs = append(pairs, &dto.LabelPair{
|
||||||
|
Name: proto.String(ln),
|
||||||
|
Value: proto.String(string(lv)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return pairs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *json2Decoder) more() error {
|
||||||
|
var entities []struct {
|
||||||
|
BaseLabels model.LabelSet `json:"baseLabels"`
|
||||||
|
Docstring string `json:"docstring"`
|
||||||
|
Metric struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Values json.RawMessage `json:"value"`
|
||||||
|
} `json:"metric"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.dec.Decode(&entities); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, e := range entities {
|
||||||
|
f := &dto.MetricFamily{
|
||||||
|
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
|
||||||
|
Help: proto.String(e.Docstring),
|
||||||
|
Type: dto.MetricType_UNTYPED.Enum(),
|
||||||
|
Metric: []*dto.Metric{},
|
||||||
|
}
|
||||||
|
|
||||||
|
d.fams = append(d.fams, f)
|
||||||
|
|
||||||
|
switch e.Metric.Type {
|
||||||
|
case "counter", "gauge":
|
||||||
|
var values []counter002
|
||||||
|
|
||||||
|
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||||
|
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ctr := range values {
|
||||||
|
labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Metric = append(f.Metric, &dto.Metric{
|
||||||
|
Label: labels,
|
||||||
|
Untyped: &dto.Untyped{
|
||||||
|
Value: proto.Float64(ctr.Value),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
case "histogram":
|
||||||
|
var values []histogram002
|
||||||
|
|
||||||
|
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||||
|
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hist := range values {
|
||||||
|
quants := make([]string, 0, len(values))
|
||||||
|
for q := range hist.Values {
|
||||||
|
quants = append(quants, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(quants)
|
||||||
|
|
||||||
|
for _, q := range quants {
|
||||||
|
value := hist.Values[q]
|
||||||
|
// The correct label is "quantile" but to not break old expressions
|
||||||
|
// this remains "percentile"
|
||||||
|
hist.Labels["percentile"] = model.LabelValue(q)
|
||||||
|
|
||||||
|
labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Metric = append(f.Metric, &dto.Metric{
|
||||||
|
Label: labels,
|
||||||
|
Untyped: &dto.Untyped{
|
||||||
|
Value: proto.Float64(value),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode implements the Decoder interface.
|
||||||
|
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
|
||||||
|
if len(d.fams) == 0 {
|
||||||
|
if err := d.more(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*v = *d.fams[0]
|
||||||
|
d.fams = d.fams[1:]
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
23
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
23
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -14,6 +14,7 @@
|
|||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@ -284,17 +285,21 @@ func labelPairsToText(
|
|||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
|
||||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||||
// includeDoubleQuote is true - '"' by '\"'.
|
// includeDoubleQuote is true - '"' by '\"'.
|
||||||
func escapeString(v string, includeDoubleQuote bool) string {
|
func escapeString(v string, includeDoubleQuote bool) string {
|
||||||
if includeDoubleQuote {
|
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
||||||
return escapeWithDoubleQuote.Replace(v)
|
for _, c := range v {
|
||||||
|
switch {
|
||||||
|
case c == '\\':
|
||||||
|
result.WriteString(`\\`)
|
||||||
|
case includeDoubleQuote && c == '"':
|
||||||
|
result.WriteString(`\"`)
|
||||||
|
case c == '\n':
|
||||||
|
result.WriteString(`\n`)
|
||||||
|
default:
|
||||||
|
result.WriteRune(c)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return result.String()
|
||||||
return escape.Replace(v)
|
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/github.com/prometheus/common/model/model.go
generated
vendored
2
vendor/github.com/prometheus/common/model/model.go
generated
vendored
@ -12,5 +12,5 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package model contains common data structures that are shared across
|
// Package model contains common data structures that are shared across
|
||||||
// Prometheus components and libraries.
|
// Prometheus componenets and libraries.
|
||||||
package model
|
package model
|
||||||
|
|||||||
6
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
6
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.5
|
- 1.3
|
||||||
- 1.6
|
- 1.4
|
||||||
|
- tip
|
||||||
|
|||||||
11
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
11
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
@ -8,13 +8,4 @@ Maintainers of this repository:
|
|||||||
The following individuals have contributed code to this repository
|
The following individuals have contributed code to this repository
|
||||||
(listed in alphabetical order):
|
(listed in alphabetical order):
|
||||||
|
|
||||||
* Armen Baghumian <abaghumian@noggin.com.au>
|
* Tobias Schmidt <ts@soundcloud.com>
|
||||||
* Bjoern Rabenstein <beorn@soundcloud.com>
|
|
||||||
* David Cournapeau <cournape@gmail.com>
|
|
||||||
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
|
|
||||||
* Jonas Große Sundrup <cherti@letopolis.de>
|
|
||||||
* Julius Volz <julius.volz@gmail.com>
|
|
||||||
* Matthias Rampke <mr@soundcloud.com>
|
|
||||||
* Nicky Gerritsen <nicky@streamone.nl>
|
|
||||||
* Rémi Audebert <contact@halfr.net>
|
|
||||||
* Tobias Schmidt <tobidt@gmail.com>
|
|
||||||
|
|||||||
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
ci:
|
|
||||||
! gofmt -l *.go | read nothing
|
|
||||||
go vet
|
|
||||||
go test -v ./...
|
|
||||||
go get github.com/golang/lint/golint
|
|
||||||
golint *.go
|
|
||||||
3
vendor/github.com/prometheus/procfs/README.md
generated
vendored
3
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@ -3,8 +3,5 @@
|
|||||||
This procfs package provides functions to retrieve system, kernel and process
|
This procfs package provides functions to retrieve system, kernel and process
|
||||||
metrics from the pseudo-filesystem proc.
|
metrics from the pseudo-filesystem proc.
|
||||||
|
|
||||||
*WARNING*: This package is a work in progress. Its API may still break in
|
|
||||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/prometheus/procfs)
|
[](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[](https://travis-ci.org/prometheus/procfs)
|
[](https://travis-ci.org/prometheus/procfs)
|
||||||
|
|||||||
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@ -27,7 +27,10 @@ func NewFS(mountPoint string) (FS, error) {
|
|||||||
return FS(mountPoint), nil
|
return FS(mountPoint), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the path of the given subsystem relative to the procfs root.
|
func (fs FS) stat(p string) (os.FileInfo, error) {
|
||||||
func (fs FS) Path(p ...string) string {
|
return os.Stat(path.Join(string(fs), p))
|
||||||
return path.Join(append([]string{string(fs)}, p...)...)
|
}
|
||||||
|
|
||||||
|
func (fs FS) open(p string) (*os.File, error) {
|
||||||
|
return os.Open(path.Join(string(fs), p))
|
||||||
}
|
}
|
||||||
|
|||||||
224
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
224
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -1,224 +0,0 @@
|
|||||||
package procfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
|
|
||||||
type IPVSStats struct {
|
|
||||||
// Total count of connections.
|
|
||||||
Connections uint64
|
|
||||||
// Total incoming packages processed.
|
|
||||||
IncomingPackets uint64
|
|
||||||
// Total outgoing packages processed.
|
|
||||||
OutgoingPackets uint64
|
|
||||||
// Total incoming traffic.
|
|
||||||
IncomingBytes uint64
|
|
||||||
// Total outgoing traffic.
|
|
||||||
OutgoingBytes uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPVSBackendStatus holds current metrics of one virtual / real address pair.
|
|
||||||
type IPVSBackendStatus struct {
|
|
||||||
// The local (virtual) IP address.
|
|
||||||
LocalAddress net.IP
|
|
||||||
// The local (virtual) port.
|
|
||||||
LocalPort uint16
|
|
||||||
// The transport protocol (TCP, UDP).
|
|
||||||
Proto string
|
|
||||||
// The remote (real) IP address.
|
|
||||||
RemoteAddress net.IP
|
|
||||||
// The remote (real) port.
|
|
||||||
RemotePort uint16
|
|
||||||
// The current number of active connections for this virtual/real address pair.
|
|
||||||
ActiveConn uint64
|
|
||||||
// The current number of inactive connections for this virtual/real address pair.
|
|
||||||
InactConn uint64
|
|
||||||
// The current weight of this virtual/real address pair.
|
|
||||||
Weight uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics.
|
|
||||||
func NewIPVSStats() (IPVSStats, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
return parseIPVSStats(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
|
|
||||||
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
|
||||||
var (
|
|
||||||
statContent []byte
|
|
||||||
statLines []string
|
|
||||||
statFields []string
|
|
||||||
stats IPVSStats
|
|
||||||
)
|
|
||||||
|
|
||||||
statContent, err := ioutil.ReadAll(file)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
statLines = strings.SplitN(string(statContent), "\n", 4)
|
|
||||||
if len(statLines) != 4 {
|
|
||||||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
statFields = strings.Fields(statLines[2])
|
|
||||||
if len(statFields) != 5 {
|
|
||||||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
|
|
||||||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return []IPVSBackendStatus{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSBackendStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
return parseIPVSBackendStatus(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
|
||||||
var (
|
|
||||||
status []IPVSBackendStatus
|
|
||||||
scanner = bufio.NewScanner(file)
|
|
||||||
proto string
|
|
||||||
localAddress net.IP
|
|
||||||
localPort uint16
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
for scanner.Scan() {
|
|
||||||
fields := strings.Fields(string(scanner.Text()))
|
|
||||||
if len(fields) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
|
|
||||||
continue
|
|
||||||
case fields[0] == "TCP" || fields[0] == "UDP":
|
|
||||||
if len(fields) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
proto = fields[0]
|
|
||||||
localAddress, localPort, err = parseIPPort(fields[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case fields[0] == "->":
|
|
||||||
if len(fields) < 6 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
remoteAddress, remotePort, err := parseIPPort(fields[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
weight, err := strconv.ParseUint(fields[3], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
activeConn, err := strconv.ParseUint(fields[4], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
inactConn, err := strconv.ParseUint(fields[5], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
status = append(status, IPVSBackendStatus{
|
|
||||||
LocalAddress: localAddress,
|
|
||||||
LocalPort: localPort,
|
|
||||||
RemoteAddress: remoteAddress,
|
|
||||||
RemotePort: remotePort,
|
|
||||||
Proto: proto,
|
|
||||||
Weight: weight,
|
|
||||||
ActiveConn: activeConn,
|
|
||||||
InactConn: inactConn,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return status, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseIPPort(s string) (net.IP, uint16, error) {
|
|
||||||
tmp := strings.SplitN(s, ":", 2)
|
|
||||||
|
|
||||||
if len(tmp) != 2 {
|
|
||||||
return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
|
|
||||||
return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, err := hex.DecodeString(tmp[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
port, err := strconv.ParseUint(tmp[1], 16, 16)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ip, uint16(port), nil
|
|
||||||
}
|
|
||||||
138
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
138
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -1,138 +0,0 @@
|
|||||||
package procfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
|
||||||
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// MDStat holds info parsed from /proc/mdstat.
|
|
||||||
type MDStat struct {
|
|
||||||
// Name of the device.
|
|
||||||
Name string
|
|
||||||
// activity-state of the device.
|
|
||||||
ActivityState string
|
|
||||||
// Number of active disks.
|
|
||||||
DisksActive int64
|
|
||||||
// Total number of disks the device consists of.
|
|
||||||
DisksTotal int64
|
|
||||||
// Number of blocks the device holds.
|
|
||||||
BlocksTotal int64
|
|
||||||
// Number of blocks on the device that are in sync.
|
|
||||||
BlocksSynced int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
|
||||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
|
||||||
mdStatusFilePath := fs.Path("mdstat")
|
|
||||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
|
||||||
if err != nil {
|
|
||||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mdStates := []MDStat{}
|
|
||||||
lines := strings.Split(string(content), "\n")
|
|
||||||
for i, l := range lines {
|
|
||||||
if l == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if l[0] == ' ' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
mainLine := strings.Split(l, " ")
|
|
||||||
if len(mainLine) < 3 {
|
|
||||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
|
||||||
}
|
|
||||||
mdName := mainLine[0]
|
|
||||||
activityState := mainLine[2]
|
|
||||||
|
|
||||||
if len(lines) <= i+3 {
|
|
||||||
return mdStates, fmt.Errorf(
|
|
||||||
"error parsing %s: too few lines for md device %s",
|
|
||||||
mdStatusFilePath,
|
|
||||||
mdName,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
active, total, size, err := evalStatusline(lines[i+1])
|
|
||||||
if err != nil {
|
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// j is the line number of the syncing-line.
|
|
||||||
j := i + 2
|
|
||||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
|
||||||
j = i + 3
|
|
||||||
}
|
|
||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently
|
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
|
||||||
syncedBlocks := size
|
|
||||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
|
||||||
syncedBlocks, err = evalBuildline(lines[j])
|
|
||||||
if err != nil {
|
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mdStates = append(mdStates, MDStat{
|
|
||||||
Name: mdName,
|
|
||||||
ActivityState: activityState,
|
|
||||||
DisksActive: active,
|
|
||||||
DisksTotal: total,
|
|
||||||
BlocksTotal: size,
|
|
||||||
BlocksSynced: syncedBlocks,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return mdStates, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
|
||||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
|
||||||
if len(matches) != 4 {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err = strconv.ParseInt(matches[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return active, total, size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
|
||||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
|
||||||
if len(matches) != 2 {
|
|
||||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
|
||||||
}
|
|
||||||
|
|
||||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
|
||||||
}
|
|
||||||
|
|
||||||
return syncedBlocks, nil
|
|
||||||
}
|
|
||||||
91
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
91
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -23,13 +24,9 @@ func (p Procs) Len() int { return len(p) }
|
|||||||
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
|
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
|
||||||
|
|
||||||
// Self returns a process for the current process read via /proc/self.
|
// Self returns a process for the current process.
|
||||||
func Self() (Proc, error) {
|
func Self() (Proc, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
return NewProc(os.Getpid())
|
||||||
if err != nil {
|
|
||||||
return Proc{}, err
|
|
||||||
}
|
|
||||||
return fs.Self()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProc returns a process for the given pid under /proc.
|
// NewProc returns a process for the given pid under /proc.
|
||||||
@ -38,42 +35,32 @@ func NewProc(pid int) (Proc, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.NewProc(pid)
|
return fs.NewProc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes under /proc.
|
// AllProcs returns a list of all currently avaible processes under /proc.
|
||||||
func AllProcs() (Procs, error) {
|
func AllProcs() (Procs, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, err
|
return Procs{}, err
|
||||||
}
|
}
|
||||||
return fs.AllProcs()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Self returns a process for the current process.
|
return fs.AllProcs()
|
||||||
func (fs FS) Self() (Proc, error) {
|
|
||||||
p, err := os.Readlink(fs.Path("self"))
|
|
||||||
if err != nil {
|
|
||||||
return Proc{}, err
|
|
||||||
}
|
|
||||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
|
|
||||||
if err != nil {
|
|
||||||
return Proc{}, err
|
|
||||||
}
|
|
||||||
return fs.NewProc(pid)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Proc{PID: pid, fs: fs}, nil
|
return Proc{PID: pid, fs: fs}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes.
|
// AllProcs returns a list of all currently avaible processes.
|
||||||
func (fs FS) AllProcs() (Procs, error) {
|
func (fs FS) AllProcs() (Procs, error) {
|
||||||
d, err := os.Open(fs.Path())
|
d, err := fs.open("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, err
|
return Procs{}, err
|
||||||
}
|
}
|
||||||
@ -98,7 +85,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
|
|
||||||
// CmdLine returns the command line of a process.
|
// CmdLine returns the command line of a process.
|
||||||
func (p Proc) CmdLine() ([]string, error) {
|
func (p Proc) CmdLine() ([]string, error) {
|
||||||
f, err := os.Open(p.path("cmdline"))
|
f, err := p.open("cmdline")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -109,39 +96,9 @@ func (p Proc) CmdLine() ([]string, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(data) < 1 {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comm returns the command name of a process.
|
|
||||||
func (p Proc) Comm() (string, error) {
|
|
||||||
f, err := os.Open(p.path("comm"))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSpace(string(data)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Executable returns the absolute path of the executable command of a process.
|
|
||||||
func (p Proc) Executable() (string, error) {
|
|
||||||
exe, err := os.Readlink(p.path("exe"))
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return exe, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptors returns the currently open file descriptors of a process.
|
// FileDescriptors returns the currently open file descriptors of a process.
|
||||||
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||||
names, err := p.fileDescriptors()
|
names, err := p.fileDescriptors()
|
||||||
@ -161,26 +118,6 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
|
|||||||
return fds, nil
|
return fds, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileDescriptorTargets returns the targets of all file descriptors of a process.
|
|
||||||
// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
|
|
||||||
func (p Proc) FileDescriptorTargets() ([]string, error) {
|
|
||||||
names, err := p.fileDescriptors()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
targets := make([]string, len(names))
|
|
||||||
|
|
||||||
for i, name := range names {
|
|
||||||
target, err := os.Readlink(p.path("fd", name))
|
|
||||||
if err == nil {
|
|
||||||
targets[i] = target
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptorsLen returns the number of currently open file descriptors of
|
// FileDescriptorsLen returns the number of currently open file descriptors of
|
||||||
// a process.
|
// a process.
|
||||||
func (p Proc) FileDescriptorsLen() (int, error) {
|
func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
@ -193,7 +130,7 @@ func (p Proc) FileDescriptorsLen() (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) fileDescriptors() ([]string, error) {
|
func (p Proc) fileDescriptors() ([]string, error) {
|
||||||
d, err := os.Open(p.path("fd"))
|
d, err := p.open("fd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -207,6 +144,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) path(pa ...string) string {
|
func (p Proc) open(pa string) (*os.File, error) {
|
||||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
|
||||||
}
|
}
|
||||||
|
|||||||
55
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
55
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@ -1,55 +0,0 @@
|
|||||||
package procfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcIO models the content of /proc/<pid>/io.
|
|
||||||
type ProcIO struct {
|
|
||||||
// Chars read.
|
|
||||||
RChar uint64
|
|
||||||
// Chars written.
|
|
||||||
WChar uint64
|
|
||||||
// Read syscalls.
|
|
||||||
SyscR uint64
|
|
||||||
// Write syscalls.
|
|
||||||
SyscW uint64
|
|
||||||
// Bytes read.
|
|
||||||
ReadBytes uint64
|
|
||||||
// Bytes written.
|
|
||||||
WriteBytes uint64
|
|
||||||
// Bytes written, but taking into account truncation. See
|
|
||||||
// Documentation/filesystems/proc.txt in the kernel sources for
|
|
||||||
// detailed explanation.
|
|
||||||
CancelledWriteBytes int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIO creates a new ProcIO instance from a given Proc instance.
|
|
||||||
func (p Proc) NewIO() (ProcIO, error) {
|
|
||||||
pio := ProcIO{}
|
|
||||||
|
|
||||||
f, err := os.Open(p.path("io"))
|
|
||||||
if err != nil {
|
|
||||||
return pio, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return pio, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
|
||||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
|
||||||
"cancelled_write_bytes: %d\n"
|
|
||||||
|
|
||||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
|
||||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
|
||||||
if err != nil {
|
|
||||||
return pio, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return pio, nil
|
|
||||||
}
|
|
||||||
64
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
64
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@ -3,56 +3,29 @@ package procfs
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcLimits represents the soft limits for each of the process's resource
|
// ProcLimits represents the soft limits for each of the process's resource
|
||||||
// limits. For more information see getrlimit(2):
|
// limits.
|
||||||
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
|
||||||
type ProcLimits struct {
|
type ProcLimits struct {
|
||||||
// CPU time limit in seconds.
|
CPUTime int
|
||||||
CPUTime int
|
FileSize int
|
||||||
// Maximum size of files that the process may create.
|
DataSize int
|
||||||
FileSize int
|
StackSize int
|
||||||
// Maximum size of the process's data segment (initialized data,
|
CoreFileSize int
|
||||||
// uninitialized data, and heap).
|
ResidentSet int
|
||||||
DataSize int
|
Processes int
|
||||||
// Maximum size of the process stack in bytes.
|
OpenFiles int
|
||||||
StackSize int
|
LockedMemory int
|
||||||
// Maximum size of a core file.
|
AddressSpace int
|
||||||
CoreFileSize int
|
FileLocks int
|
||||||
// Limit of the process's resident set in pages.
|
PendingSignals int
|
||||||
ResidentSet int
|
MsqqueueSize int
|
||||||
// Maximum number of processes that can be created for the real user ID of
|
NicePriority int
|
||||||
// the calling process.
|
|
||||||
Processes int
|
|
||||||
// Value one greater than the maximum file descriptor number that can be
|
|
||||||
// opened by this process.
|
|
||||||
OpenFiles int
|
|
||||||
// Maximum number of bytes of memory that may be locked into RAM.
|
|
||||||
LockedMemory int
|
|
||||||
// Maximum size of the process's virtual memory address space in bytes.
|
|
||||||
AddressSpace int
|
|
||||||
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
|
||||||
// this process may establish.
|
|
||||||
FileLocks int
|
|
||||||
// Limit of signals that may be queued for the real user ID of the calling
|
|
||||||
// process.
|
|
||||||
PendingSignals int
|
|
||||||
// Limit on the number of bytes that can be allocated for POSIX message
|
|
||||||
// queues for the real user ID of the calling process.
|
|
||||||
MsqqueueSize int
|
|
||||||
// Limit of the nice priority set using setpriority(2) or nice(2).
|
|
||||||
NicePriority int
|
|
||||||
// Limit of the real-time priority set using sched_setscheduler(2) or
|
|
||||||
// sched_setparam(2).
|
|
||||||
RealtimePriority int
|
RealtimePriority int
|
||||||
// Limit (in microseconds) on the amount of CPU time that a process
|
RealtimeTimeout int
|
||||||
// scheduled under a real-time scheduling policy may consume without making
|
|
||||||
// a blocking system call.
|
|
||||||
RealtimeTimeout int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -66,7 +39,7 @@ var (
|
|||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
f, err := os.Open(p.path("limits"))
|
f, err := p.open("limits")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
}
|
}
|
||||||
@ -87,7 +60,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
|||||||
case "Max cpu time":
|
case "Max cpu time":
|
||||||
l.CPUTime, err = parseInt(fields[1])
|
l.CPUTime, err = parseInt(fields[1])
|
||||||
case "Max file size":
|
case "Max file size":
|
||||||
l.FileSize, err = parseInt(fields[1])
|
l.FileLocks, err = parseInt(fields[1])
|
||||||
case "Max data size":
|
case "Max data size":
|
||||||
l.DataSize, err = parseInt(fields[1])
|
l.DataSize, err = parseInt(fields[1])
|
||||||
case "Max stack size":
|
case "Max stack size":
|
||||||
@ -117,6 +90,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
|||||||
case "Max realtime timeout":
|
case "Max realtime timeout":
|
||||||
l.RealtimeTimeout, err = parseInt(fields[1])
|
l.RealtimeTimeout, err = parseInt(fields[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
}
|
}
|
||||||
|
|||||||
18
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -7,15 +7,15 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
|
||||||
// which required cgo. However, that caused a lot of problems regarding
|
// required cgo. However, that caused a lot of problems regarding
|
||||||
// cross-compilation. Alternatives such as running a binary to determine the
|
// cross-compilation. Alternatives such as running a binary to determine the
|
||||||
// value, or trying to derive it in some other way were all problematic. After
|
// value, or trying to derive it in some other way were all problematic.
|
||||||
// much research it was determined that USER_HZ is actually hardcoded to 100 on
|
// After much research it was determined that USER_HZ is actually hardcoded to
|
||||||
// all Go-supported platforms as of the time of this writing. This is why we
|
// 100 on all Go-supported platforms as of the time of this writing. This is
|
||||||
// decided to hardcode it here as well. It is not impossible that there could
|
// why we decided to hardcode it here as well. It is not impossible that there
|
||||||
// be systems with exceptions, but they should be very exotic edge cases, and
|
// could be systems with exceptions, but they should be very exotic edge cases,
|
||||||
// in that case, the worst outcome will be two misreported metrics.
|
// and in that case, the worst outcome will be two misreported metrics.
|
||||||
//
|
//
|
||||||
// See also the following discussions:
|
// See also the following discussions:
|
||||||
//
|
//
|
||||||
@ -91,7 +91,7 @@ type ProcStat struct {
|
|||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
f, err := os.Open(p.path("stat"))
|
f, err := p.open("stat")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
}
|
}
|
||||||
|
|||||||
3
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
3
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -3,7 +3,6 @@ package procfs
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -26,7 +25,7 @@ func NewStat() (Stat, error) {
|
|||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns an information about current kernel/system statistics.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
f, err := os.Open(fs.Path("stat"))
|
f, err := fs.open("stat")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user