forked from LaconicNetwork/kompose
update vendor with openshift
This commit is contained in:
parent
713aadff8f
commit
c2a07f764d
116
Godeps/Godeps.json
generated
116
Godeps/Godeps.json
generated
@ -24,6 +24,11 @@
|
|||||||
"Comment": "v0.8.2",
|
"Comment": "v0.8.2",
|
||||||
"Rev": "98a1428efc3d732f9e377b50c8e2113e070896cf"
|
"Rev": "98a1428efc3d732f9e377b50c8e2113e070896cf"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/blang/semver",
|
||||||
|
"Comment": "v3.0.1",
|
||||||
|
"Rev": "31b736133b98f26d5e078ec9eb591666edfd091f"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
|
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
|
||||||
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
|
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
|
||||||
@ -47,6 +52,21 @@
|
|||||||
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/distribution/manifest",
|
||||||
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/distribution/manifest/schema1",
|
||||||
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/distribution/manifest/schema2",
|
||||||
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
|
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution/reference",
|
"ImportPath": "github.com/docker/distribution/reference",
|
||||||
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
|
||||||
@ -477,6 +497,10 @@
|
|||||||
"Comment": "v0.2.0-186-ga12288b",
|
"Comment": "v0.2.0-186-ga12288b",
|
||||||
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
|
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/libtrust",
|
||||||
|
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/fatih/structs",
|
"ImportPath": "github.com/fatih/structs",
|
||||||
"Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e"
|
"Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e"
|
||||||
@ -485,6 +509,78 @@
|
|||||||
"ImportPath": "github.com/flynn/go-shlex",
|
"ImportPath": "github.com/flynn/go-shlex",
|
||||||
"Rev": "3f9db97f856818214da2e1057f8ad84803971cff"
|
"Rev": "3f9db97f856818214da2e1057f8ad84803971cff"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
|
||||||
|
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/ghodss/yaml",
|
"ImportPath": "github.com/ghodss/yaml",
|
||||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||||
@ -535,6 +631,16 @@
|
|||||||
"Comment": "v0.0.7",
|
"Comment": "v0.0.7",
|
||||||
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/openshift/origin/pkg/deploy/api",
|
||||||
|
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
|
||||||
|
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/openshift/origin/pkg/image/api",
|
||||||
|
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
|
||||||
|
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pborman/uuid",
|
"ImportPath": "github.com/pborman/uuid",
|
||||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||||
@ -609,6 +715,11 @@
|
|||||||
"Comment": "v1.3.0",
|
"Comment": "v1.3.0",
|
||||||
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "k8s.io/kubernetes/pkg/api/errors",
|
||||||
|
"Comment": "v1.3.0",
|
||||||
|
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/api/meta",
|
"ImportPath": "k8s.io/kubernetes/pkg/api/meta",
|
||||||
"Comment": "v1.3.0",
|
"Comment": "v1.3.0",
|
||||||
@ -749,6 +860,11 @@
|
|||||||
"Comment": "v1.3.0",
|
"Comment": "v1.3.0",
|
||||||
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "k8s.io/kubernetes/pkg/util/validation/field",
|
||||||
|
"Comment": "v1.3.0",
|
||||||
|
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/util/wait",
|
"ImportPath": "k8s.io/kubernetes/pkg/util/wait",
|
||||||
"Comment": "v1.3.0",
|
"Comment": "v1.3.0",
|
||||||
|
|||||||
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
Normal file
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
The MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
||||||
142
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
142
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
semver for golang [](https://drone.io/github.com/blang/semver/latest) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
||||||
|
======
|
||||||
|
|
||||||
|
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
```bash
|
||||||
|
$ go get github.com/blang/semver
|
||||||
|
```
|
||||||
|
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import github.com/blang/semver
|
||||||
|
v1, err := semver.Make("1.0.0-beta")
|
||||||
|
v2, err := semver.Make("2.0.0-beta")
|
||||||
|
v1.Compare(v2)
|
||||||
|
```
|
||||||
|
|
||||||
|
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||||
|
|
||||||
|
Why should I use this lib?
|
||||||
|
-----
|
||||||
|
|
||||||
|
- Fully spec compatible
|
||||||
|
- No reflection
|
||||||
|
- No regex
|
||||||
|
- Fully tested (Coverage >99%)
|
||||||
|
- Readable parsing/validation errors
|
||||||
|
- Fast (See [Benchmarks](#benchmarks))
|
||||||
|
- Only Stdlib
|
||||||
|
- Uses values instead of pointers
|
||||||
|
- Many features, see below
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
-----
|
||||||
|
|
||||||
|
- Parsing and validation at all levels
|
||||||
|
- Comparator-like comparisons
|
||||||
|
- Compare Helper Methods
|
||||||
|
- InPlace manipulation
|
||||||
|
- Sortable (implements sort.Interface)
|
||||||
|
- database/sql compatible (sql.Scanner/Valuer)
|
||||||
|
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
-----
|
||||||
|
|
||||||
|
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||||
|
|
||||||
|
```go
|
||||||
|
import github.com/blang/semver
|
||||||
|
|
||||||
|
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||||
|
fmt.Printf("Major: %d\n", v.Major)
|
||||||
|
fmt.Printf("Minor: %d\n", v.Minor)
|
||||||
|
fmt.Printf("Patch: %d\n", v.Patch)
|
||||||
|
fmt.Printf("Pre: %s\n", v.Pre)
|
||||||
|
fmt.Printf("Build: %s\n", v.Build)
|
||||||
|
|
||||||
|
// Prerelease versions array
|
||||||
|
if len(v.Pre) > 0 {
|
||||||
|
fmt.Println("Prerelease versions:")
|
||||||
|
for i, pre := range v.Pre {
|
||||||
|
fmt.Printf("%d: %q\n", i, pre)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build meta data array
|
||||||
|
if len(v.Build) > 0 {
|
||||||
|
fmt.Println("Build meta data:")
|
||||||
|
for i, build := range v.Build {
|
||||||
|
fmt.Printf("%d: %q\n", i, build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v001, err := semver.Make("0.0.1")
|
||||||
|
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||||
|
v001.GT(v) == true
|
||||||
|
v.LT(v001) == true
|
||||||
|
v.GTE(v) == true
|
||||||
|
v.LTE(v) == true
|
||||||
|
|
||||||
|
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||||
|
v001.Compare(v) == 1
|
||||||
|
v.Compare(v001) == -1
|
||||||
|
v.Compare(v) == 0
|
||||||
|
|
||||||
|
// Manipulate Version in place:
|
||||||
|
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing pre release version: %q", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\nValidate versions:")
|
||||||
|
v.Build[0] = "?"
|
||||||
|
|
||||||
|
err = v.Validate()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Validation failed: %s\n", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Benchmarks
|
||||||
|
-----
|
||||||
|
|
||||||
|
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
|
||||||
|
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
|
||||||
|
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
|
||||||
|
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
|
||||||
|
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
|
||||||
|
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
|
||||||
|
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
|
||||||
|
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
|
||||||
|
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
|
||||||
|
|
||||||
|
See benchmark cases at [semver_test.go](semver_test.go)
|
||||||
|
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
-----
|
||||||
|
|
||||||
|
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||||
|
|
||||||
|
|
||||||
|
Contribution
|
||||||
|
-----
|
||||||
|
|
||||||
|
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||||
|
|
||||||
|
|
||||||
|
License
|
||||||
|
-----
|
||||||
|
|
||||||
|
See [LICENSE](LICENSE) file.
|
||||||
23
vendor/github.com/blang/semver/json.go
generated
vendored
Normal file
23
vendor/github.com/blang/semver/json.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalJSON implements the encoding/json.Marshaler interface.
|
||||||
|
func (v Version) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(v.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
||||||
|
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
||||||
|
var versionString string
|
||||||
|
|
||||||
|
if err = json.Unmarshal(data, &versionString); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
*v, err = Parse(versionString)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
395
vendor/github.com/blang/semver/semver.go
generated
vendored
Normal file
395
vendor/github.com/blang/semver/semver.go
generated
vendored
Normal file
@ -0,0 +1,395 @@
|
|||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
numbers string = "0123456789"
|
||||||
|
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||||
|
alphanum = alphas + numbers
|
||||||
|
)
|
||||||
|
|
||||||
|
// SpecVersion is the latest fully supported spec version of semver
|
||||||
|
var SpecVersion = Version{
|
||||||
|
Major: 2,
|
||||||
|
Minor: 0,
|
||||||
|
Patch: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version represents a semver compatible version
|
||||||
|
type Version struct {
|
||||||
|
Major uint64
|
||||||
|
Minor uint64
|
||||||
|
Patch uint64
|
||||||
|
Pre []PRVersion
|
||||||
|
Build []string //No Precendence
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version to string
|
||||||
|
func (v Version) String() string {
|
||||||
|
b := make([]byte, 0, 5)
|
||||||
|
b = strconv.AppendUint(b, v.Major, 10)
|
||||||
|
b = append(b, '.')
|
||||||
|
b = strconv.AppendUint(b, v.Minor, 10)
|
||||||
|
b = append(b, '.')
|
||||||
|
b = strconv.AppendUint(b, v.Patch, 10)
|
||||||
|
|
||||||
|
if len(v.Pre) > 0 {
|
||||||
|
b = append(b, '-')
|
||||||
|
b = append(b, v.Pre[0].String()...)
|
||||||
|
|
||||||
|
for _, pre := range v.Pre[1:] {
|
||||||
|
b = append(b, '.')
|
||||||
|
b = append(b, pre.String()...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(v.Build) > 0 {
|
||||||
|
b = append(b, '+')
|
||||||
|
b = append(b, v.Build[0]...)
|
||||||
|
|
||||||
|
for _, build := range v.Build[1:] {
|
||||||
|
b = append(b, '.')
|
||||||
|
b = append(b, build...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals checks if v is equal to o.
|
||||||
|
func (v Version) Equals(o Version) bool {
|
||||||
|
return (v.Compare(o) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EQ checks if v is equal to o.
|
||||||
|
func (v Version) EQ(o Version) bool {
|
||||||
|
return (v.Compare(o) == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NE checks if v is not equal to o.
|
||||||
|
func (v Version) NE(o Version) bool {
|
||||||
|
return (v.Compare(o) != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GT checks if v is greater than o.
|
||||||
|
func (v Version) GT(o Version) bool {
|
||||||
|
return (v.Compare(o) == 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GTE checks if v is greater than or equal to o.
|
||||||
|
func (v Version) GTE(o Version) bool {
|
||||||
|
return (v.Compare(o) >= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GE checks if v is greater than or equal to o.
|
||||||
|
func (v Version) GE(o Version) bool {
|
||||||
|
return (v.Compare(o) >= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LT checks if v is less than o.
|
||||||
|
func (v Version) LT(o Version) bool {
|
||||||
|
return (v.Compare(o) == -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LTE checks if v is less than or equal to o.
|
||||||
|
func (v Version) LTE(o Version) bool {
|
||||||
|
return (v.Compare(o) <= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LE checks if v is less than or equal to o.
|
||||||
|
func (v Version) LE(o Version) bool {
|
||||||
|
return (v.Compare(o) <= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare compares Versions v to o:
|
||||||
|
// -1 == v is less than o
|
||||||
|
// 0 == v is equal to o
|
||||||
|
// 1 == v is greater than o
|
||||||
|
func (v Version) Compare(o Version) int {
|
||||||
|
if v.Major != o.Major {
|
||||||
|
if v.Major > o.Major {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if v.Minor != o.Minor {
|
||||||
|
if v.Minor > o.Minor {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if v.Patch != o.Patch {
|
||||||
|
if v.Patch > o.Patch {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quick comparison if a version has no prerelease versions
|
||||||
|
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
||||||
|
return 0
|
||||||
|
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
||||||
|
return 1
|
||||||
|
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
||||||
|
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
||||||
|
continue
|
||||||
|
} else if comp == 1 {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all pr versions are the equal but one has further prversion, this one greater
|
||||||
|
if i == len(v.Pre) && i == len(o.Pre) {
|
||||||
|
return 0
|
||||||
|
} else if i == len(v.Pre) && i < len(o.Pre) {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate validates v and returns error in case
|
||||||
|
func (v Version) Validate() error {
|
||||||
|
// Major, Minor, Patch already validated using uint64
|
||||||
|
|
||||||
|
for _, pre := range v.Pre {
|
||||||
|
if !pre.IsNum { //Numeric prerelease versions already uint64
|
||||||
|
if len(pre.VersionStr) == 0 {
|
||||||
|
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
||||||
|
}
|
||||||
|
if !containsOnly(pre.VersionStr, alphanum) {
|
||||||
|
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, build := range v.Build {
|
||||||
|
if len(build) == 0 {
|
||||||
|
return fmt.Errorf("Build meta data can not be empty %q", build)
|
||||||
|
}
|
||||||
|
if !containsOnly(build, alphanum) {
|
||||||
|
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||||
|
func New(s string) (vp *Version, err error) {
|
||||||
|
v, err := Parse(s)
|
||||||
|
vp = &v
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||||
|
func Make(s string) (Version, error) {
|
||||||
|
return Parse(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses version string and returns a validated Version or error
|
||||||
|
func Parse(s string) (Version, error) {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return Version{}, errors.New("Version string empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split into major.minor.(patch+pr+meta)
|
||||||
|
parts := strings.SplitN(s, ".", 3)
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Major
|
||||||
|
if !containsOnly(parts[0], numbers) {
|
||||||
|
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
||||||
|
}
|
||||||
|
if hasLeadingZeroes(parts[0]) {
|
||||||
|
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
||||||
|
}
|
||||||
|
major, err := strconv.ParseUint(parts[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return Version{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minor
|
||||||
|
if !containsOnly(parts[1], numbers) {
|
||||||
|
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
||||||
|
}
|
||||||
|
if hasLeadingZeroes(parts[1]) {
|
||||||
|
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
||||||
|
}
|
||||||
|
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return Version{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := Version{}
|
||||||
|
v.Major = major
|
||||||
|
v.Minor = minor
|
||||||
|
|
||||||
|
var build, prerelease []string
|
||||||
|
patchStr := parts[2]
|
||||||
|
|
||||||
|
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
||||||
|
build = strings.Split(patchStr[buildIndex+1:], ".")
|
||||||
|
patchStr = patchStr[:buildIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
||||||
|
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
||||||
|
patchStr = patchStr[:preIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
if !containsOnly(patchStr, numbers) {
|
||||||
|
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
||||||
|
}
|
||||||
|
if hasLeadingZeroes(patchStr) {
|
||||||
|
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
||||||
|
}
|
||||||
|
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return Version{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Patch = patch
|
||||||
|
|
||||||
|
// Prerelease
|
||||||
|
for _, prstr := range prerelease {
|
||||||
|
parsedPR, err := NewPRVersion(prstr)
|
||||||
|
if err != nil {
|
||||||
|
return Version{}, err
|
||||||
|
}
|
||||||
|
v.Pre = append(v.Pre, parsedPR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build meta data
|
||||||
|
for _, str := range build {
|
||||||
|
if len(str) == 0 {
|
||||||
|
return Version{}, errors.New("Build meta data is empty")
|
||||||
|
}
|
||||||
|
if !containsOnly(str, alphanum) {
|
||||||
|
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
||||||
|
}
|
||||||
|
v.Build = append(v.Build, str)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParse is like Parse but panics if the version cannot be parsed.
|
||||||
|
func MustParse(s string) Version {
|
||||||
|
v, err := Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(`semver: Parse(` + s + `): ` + err.Error())
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// PRVersion represents a PreRelease Version
|
||||||
|
type PRVersion struct {
|
||||||
|
VersionStr string
|
||||||
|
VersionNum uint64
|
||||||
|
IsNum bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPRVersion creates a new valid prerelease version
|
||||||
|
func NewPRVersion(s string) (PRVersion, error) {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return PRVersion{}, errors.New("Prerelease is empty")
|
||||||
|
}
|
||||||
|
v := PRVersion{}
|
||||||
|
if containsOnly(s, numbers) {
|
||||||
|
if hasLeadingZeroes(s) {
|
||||||
|
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
||||||
|
}
|
||||||
|
num, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
|
||||||
|
// Might never be hit, but just in case
|
||||||
|
if err != nil {
|
||||||
|
return PRVersion{}, err
|
||||||
|
}
|
||||||
|
v.VersionNum = num
|
||||||
|
v.IsNum = true
|
||||||
|
} else if containsOnly(s, alphanum) {
|
||||||
|
v.VersionStr = s
|
||||||
|
v.IsNum = false
|
||||||
|
} else {
|
||||||
|
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNumeric checks if prerelease-version is numeric
|
||||||
|
func (v PRVersion) IsNumeric() bool {
|
||||||
|
return v.IsNum
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare compares two PreRelease Versions v and o:
|
||||||
|
// -1 == v is less than o
|
||||||
|
// 0 == v is equal to o
|
||||||
|
// 1 == v is greater than o
|
||||||
|
func (v PRVersion) Compare(o PRVersion) int {
|
||||||
|
if v.IsNum && !o.IsNum {
|
||||||
|
return -1
|
||||||
|
} else if !v.IsNum && o.IsNum {
|
||||||
|
return 1
|
||||||
|
} else if v.IsNum && o.IsNum {
|
||||||
|
if v.VersionNum == o.VersionNum {
|
||||||
|
return 0
|
||||||
|
} else if v.VersionNum > o.VersionNum {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
} else { // both are Alphas
|
||||||
|
if v.VersionStr == o.VersionStr {
|
||||||
|
return 0
|
||||||
|
} else if v.VersionStr > o.VersionStr {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreRelease version to string
|
||||||
|
func (v PRVersion) String() string {
|
||||||
|
if v.IsNum {
|
||||||
|
return strconv.FormatUint(v.VersionNum, 10)
|
||||||
|
}
|
||||||
|
return v.VersionStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsOnly(s string, set string) bool {
|
||||||
|
return strings.IndexFunc(s, func(r rune) bool {
|
||||||
|
return !strings.ContainsRune(set, r)
|
||||||
|
}) == -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasLeadingZeroes(s string) bool {
|
||||||
|
return len(s) > 1 && s[0] == '0'
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuildVersion creates a new valid build version
|
||||||
|
func NewBuildVersion(s string) (string, error) {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return "", errors.New("Buildversion is empty")
|
||||||
|
}
|
||||||
|
if !containsOnly(s, alphanum) {
|
||||||
|
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
28
vendor/github.com/blang/semver/sort.go
generated
vendored
Normal file
28
vendor/github.com/blang/semver/sort.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Versions represents multiple versions.
|
||||||
|
type Versions []Version
|
||||||
|
|
||||||
|
// Len returns length of version collection
|
||||||
|
func (s Versions) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps two versions inside the collection by its indices
|
||||||
|
func (s Versions) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less checks if version at index i is less than version at index j
|
||||||
|
func (s Versions) Less(i, j int) bool {
|
||||||
|
return s[i].LT(s[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort sorts a slice of versions
|
||||||
|
func Sort(versions []Version) {
|
||||||
|
sort.Sort(Versions(versions))
|
||||||
|
}
|
||||||
30
vendor/github.com/blang/semver/sql.go
generated
vendored
Normal file
30
vendor/github.com/blang/semver/sql.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Scan implements the database/sql.Scanner interface.
|
||||||
|
func (v *Version) Scan(src interface{}) (err error) {
|
||||||
|
var str string
|
||||||
|
switch src := src.(type) {
|
||||||
|
case string:
|
||||||
|
str = src
|
||||||
|
case []byte:
|
||||||
|
str = string(src)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, err := Parse(str); err == nil {
|
||||||
|
*v = t
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements the database/sql/driver.Valuer interface.
|
||||||
|
func (v Version) Value() (driver.Value, error) {
|
||||||
|
return v.String(), nil
|
||||||
|
}
|
||||||
1
vendor/github.com/docker/distribution/manifest/doc.go
generated
vendored
Normal file
1
vendor/github.com/docker/distribution/manifest/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package manifest
|
||||||
283
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
Normal file
283
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
package schema1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
"github.com/docker/distribution/manifest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type diffID digest.Digest
|
||||||
|
|
||||||
|
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
|
||||||
|
// (1024 NULL bytes)
|
||||||
|
var gzippedEmptyTar = []byte{
|
||||||
|
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||||
|
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
|
||||||
|
// gzippedEmptyTar
|
||||||
|
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||||
|
|
||||||
|
// configManifestBuilder is a type for constructing manifests from an image
|
||||||
|
// configuration and generic descriptors.
|
||||||
|
type configManifestBuilder struct {
|
||||||
|
// bs is a BlobService used to create empty layer tars in the
|
||||||
|
// blob store if necessary.
|
||||||
|
bs distribution.BlobService
|
||||||
|
// pk is the libtrust private key used to sign the final manifest.
|
||||||
|
pk libtrust.PrivateKey
|
||||||
|
// configJSON is configuration supplied when the ManifestBuilder was
|
||||||
|
// created.
|
||||||
|
configJSON []byte
|
||||||
|
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
|
||||||
|
ref reference.Named
|
||||||
|
// descriptors is the set of descriptors referencing the layers.
|
||||||
|
descriptors []distribution.Descriptor
|
||||||
|
// emptyTarDigest is set to a valid digest if an empty tar has been
|
||||||
|
// put in the blob store; otherwise it is empty.
|
||||||
|
emptyTarDigest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfigManifestBuilder is used to build new manifests for the current
|
||||||
|
// schema version from an image configuration and a set of descriptors.
|
||||||
|
// It takes a BlobService so that it can add an empty tar to the blob store
|
||||||
|
// if the resulting manifest needs empty layers.
|
||||||
|
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
|
||||||
|
return &configManifestBuilder{
|
||||||
|
bs: bs,
|
||||||
|
pk: pk,
|
||||||
|
configJSON: configJSON,
|
||||||
|
ref: ref,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build produces a final manifest from the given references
|
||||||
|
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
|
||||||
|
type imageRootFS struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
DiffIDs []diffID `json:"diff_ids,omitempty"`
|
||||||
|
BaseLayer string `json:"base_layer,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type imageHistory struct {
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
Author string `json:"author,omitempty"`
|
||||||
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type imageConfig struct {
|
||||||
|
RootFS *imageRootFS `json:"rootfs,omitempty"`
|
||||||
|
History []imageHistory `json:"history,omitempty"`
|
||||||
|
Architecture string `json:"architecture,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var img imageConfig
|
||||||
|
|
||||||
|
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(img.History) == 0 {
|
||||||
|
return nil, errors.New("empty history when trying to create schema1 manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
||||||
|
return nil, errors.New("number of descriptors and number of layers in rootfs must match")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate IDs for each layer
|
||||||
|
// For non-top-level layers, create fake V1Compatibility strings that
|
||||||
|
// fit the format and don't collide with anything else, but don't
|
||||||
|
// result in runnable images on their own.
|
||||||
|
type v1Compatibility struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Parent string `json:"parent,omitempty"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
ContainerConfig struct {
|
||||||
|
Cmd []string
|
||||||
|
} `json:"container_config,omitempty"`
|
||||||
|
Author string `json:"author,omitempty"`
|
||||||
|
ThrowAway bool `json:"throwaway,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
fsLayerList := make([]FSLayer, len(img.History))
|
||||||
|
history := make([]History, len(img.History))
|
||||||
|
|
||||||
|
parent := ""
|
||||||
|
layerCounter := 0
|
||||||
|
for i, h := range img.History[:len(img.History)-1] {
|
||||||
|
var blobsum digest.Digest
|
||||||
|
if h.EmptyLayer {
|
||||||
|
if blobsum, err = mb.emptyTar(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||||
|
return nil, errors.New("too many non-empty layers in History section")
|
||||||
|
}
|
||||||
|
blobsum = mb.descriptors[layerCounter].Digest
|
||||||
|
layerCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
|
||||||
|
|
||||||
|
if i == 0 && img.RootFS.BaseLayer != "" {
|
||||||
|
// windows-only baselayer setup
|
||||||
|
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
|
||||||
|
parent = fmt.Sprintf("%x", baseID[:32])
|
||||||
|
}
|
||||||
|
|
||||||
|
v1Compatibility := v1Compatibility{
|
||||||
|
ID: v1ID,
|
||||||
|
Parent: parent,
|
||||||
|
Comment: h.Comment,
|
||||||
|
Created: h.Created,
|
||||||
|
Author: h.Author,
|
||||||
|
}
|
||||||
|
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
|
||||||
|
if h.EmptyLayer {
|
||||||
|
v1Compatibility.ThrowAway = true
|
||||||
|
}
|
||||||
|
jsonBytes, err := json.Marshal(&v1Compatibility)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reversedIndex := len(img.History) - i - 1
|
||||||
|
history[reversedIndex].V1Compatibility = string(jsonBytes)
|
||||||
|
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
|
||||||
|
|
||||||
|
parent = v1ID
|
||||||
|
}
|
||||||
|
|
||||||
|
latestHistory := img.History[len(img.History)-1]
|
||||||
|
|
||||||
|
var blobsum digest.Digest
|
||||||
|
if latestHistory.EmptyLayer {
|
||||||
|
if blobsum, err = mb.emptyTar(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||||
|
return nil, errors.New("too many non-empty layers in History section")
|
||||||
|
}
|
||||||
|
blobsum = mb.descriptors[layerCounter].Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
fsLayerList[0] = FSLayer{BlobSum: blobsum}
|
||||||
|
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
|
||||||
|
|
||||||
|
// Top-level v1compatibility string should be a modified version of the
|
||||||
|
// image config.
|
||||||
|
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
history[0].V1Compatibility = string(transformedConfig)
|
||||||
|
|
||||||
|
tag := ""
|
||||||
|
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
|
||||||
|
tag = tagged.Tag()
|
||||||
|
}
|
||||||
|
|
||||||
|
mfst := Manifest{
|
||||||
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
Name: mb.ref.Name(),
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: img.Architecture,
|
||||||
|
FSLayers: fsLayerList,
|
||||||
|
History: history,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Sign(&mfst, mb.pk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
|
||||||
|
// already exist, and returns its blobsum.
|
||||||
|
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
|
||||||
|
if mb.emptyTarDigest != "" {
|
||||||
|
// Already put an empty tar
|
||||||
|
return mb.emptyTarDigest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
mb.emptyTarDigest = descriptor.Digest
|
||||||
|
return descriptor.Digest, nil
|
||||||
|
case distribution.ErrBlobUnknown:
|
||||||
|
// nop
|
||||||
|
default:
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add gzipped empty tar to the blob store
|
||||||
|
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
mb.emptyTarDigest = descriptor.Digest
|
||||||
|
|
||||||
|
return descriptor.Digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendReference adds a reference to the current ManifestBuilder
|
||||||
|
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
|
||||||
|
// todo: verification here?
|
||||||
|
mb.descriptors = append(mb.descriptors, d.Descriptor())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// References returns the current references added to this builder
|
||||||
|
func (mb *configManifestBuilder) References() []distribution.Descriptor {
|
||||||
|
return mb.descriptors
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
|
||||||
|
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
|
||||||
|
// Top-level v1compatibility string should be a modified version of the
|
||||||
|
// image config.
|
||||||
|
var configAsMap map[string]*json.RawMessage
|
||||||
|
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete fields that didn't exist in old manifest
|
||||||
|
delete(configAsMap, "rootfs")
|
||||||
|
delete(configAsMap, "history")
|
||||||
|
configAsMap["id"] = rawJSON(v1ID)
|
||||||
|
if parentV1ID != "" {
|
||||||
|
configAsMap["parent"] = rawJSON(parentV1ID)
|
||||||
|
}
|
||||||
|
if throwaway {
|
||||||
|
configAsMap["throwaway"] = rawJSON(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(configAsMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rawJSON(value interface{}) *json.RawMessage {
|
||||||
|
jsonval, err := json.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return (*json.RawMessage)(&jsonval)
|
||||||
|
}
|
||||||
184
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
Normal file
184
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
package schema1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
"github.com/docker/distribution/manifest"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MediaTypeManifest specifies the mediaType for the current version. Note
|
||||||
|
// that for schema version 1, the the media is optionally "application/json".
|
||||||
|
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
|
||||||
|
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
|
||||||
|
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||||
|
// MediaTypeManifestLayer specifies the media type for manifest layers
|
||||||
|
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SchemaVersion provides a pre-initialized version structure for this
|
||||||
|
// packages version of the manifest.
|
||||||
|
SchemaVersion = manifest.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
|
sm := new(SignedManifest)
|
||||||
|
err := sm.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
desc := distribution.Descriptor{
|
||||||
|
Digest: digest.FromBytes(sm.Canonical),
|
||||||
|
Size: int64(len(sm.Canonical)),
|
||||||
|
MediaType: MediaTypeSignedManifest,
|
||||||
|
}
|
||||||
|
return sm, desc, err
|
||||||
|
}
|
||||||
|
err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
|
}
|
||||||
|
err = distribution.RegisterManifestSchema("", schema1Func)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
|
}
|
||||||
|
err = distribution.RegisterManifestSchema("application/json", schema1Func)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSLayer is a container struct for BlobSums defined in an image manifest
|
||||||
|
type FSLayer struct {
|
||||||
|
// BlobSum is the tarsum of the referenced filesystem image layer
|
||||||
|
BlobSum digest.Digest `json:"blobSum"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// History stores unstructured v1 compatibility information
|
||||||
|
type History struct {
|
||||||
|
// V1Compatibility is the raw v1 compatibility information
|
||||||
|
V1Compatibility string `json:"v1Compatibility"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manifest provides the base accessible fields for working with V2 image
|
||||||
|
// format in the registry.
|
||||||
|
type Manifest struct {
|
||||||
|
manifest.Versioned
|
||||||
|
|
||||||
|
// Name is the name of the image's repository
|
||||||
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// Tag is the tag of the image specified by this manifest
|
||||||
|
Tag string `json:"tag"`
|
||||||
|
|
||||||
|
// Architecture is the host architecture on which this image is intended to
|
||||||
|
// run
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
|
||||||
|
// FSLayers is a list of filesystem layer blobSums contained in this image
|
||||||
|
FSLayers []FSLayer `json:"fsLayers"`
|
||||||
|
|
||||||
|
// History is a list of unstructured historical data for v1 compatibility
|
||||||
|
History []History `json:"history"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignedManifest provides an envelope for a signed image manifest, including
|
||||||
|
// the format sensitive raw bytes.
|
||||||
|
type SignedManifest struct {
|
||||||
|
Manifest
|
||||||
|
|
||||||
|
// Canonical is the canonical byte representation of the ImageManifest,
|
||||||
|
// without any attached signatures. The manifest byte
|
||||||
|
// representation cannot change or it will have to be re-signed.
|
||||||
|
Canonical []byte `json:"-"`
|
||||||
|
|
||||||
|
// all contains the byte representation of the Manifest including signatures
|
||||||
|
// and is returned by Payload()
|
||||||
|
all []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
|
||||||
|
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
|
||||||
|
sm.all = make([]byte, len(b), len(b))
|
||||||
|
// store manifest and signatures in all
|
||||||
|
copy(sm.all, b)
|
||||||
|
|
||||||
|
jsig, err := libtrust.ParsePrettySignature(b, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the payload in the manifest.
|
||||||
|
bytes, err := jsig.Payload()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// sm.Canonical stores the canonical manifest JSON
|
||||||
|
sm.Canonical = make([]byte, len(bytes), len(bytes))
|
||||||
|
copy(sm.Canonical, bytes)
|
||||||
|
|
||||||
|
// Unmarshal canonical JSON into Manifest object
|
||||||
|
var manifest Manifest
|
||||||
|
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Manifest = manifest
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// References returnes the descriptors of this manifests references
|
||||||
|
func (sm SignedManifest) References() []distribution.Descriptor {
|
||||||
|
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
|
||||||
|
for i, fsLayer := range sm.FSLayers {
|
||||||
|
dependencies[i] = distribution.Descriptor{
|
||||||
|
MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||||
|
Digest: fsLayer.BlobSum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dependencies
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
|
||||||
|
// contents. Applications requiring a marshaled signed manifest should simply
|
||||||
|
// use Raw directly, since the the content produced by json.Marshal will be
|
||||||
|
// compacted and will fail signature checks.
|
||||||
|
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
|
||||||
|
if len(sm.all) > 0 {
|
||||||
|
return sm.all, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the raw data is not available, just dump the inner content.
|
||||||
|
return json.Marshal(&sm.Manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Payload returns the signed content of the signed manifest.
|
||||||
|
func (sm SignedManifest) Payload() (string, []byte, error) {
|
||||||
|
return MediaTypeSignedManifest, sm.all, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signatures returns the signatures as provided by
|
||||||
|
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
|
||||||
|
// signatures.
|
||||||
|
func (sm *SignedManifest) Signatures() ([][]byte, error) {
|
||||||
|
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the payload in the manifest.
|
||||||
|
return jsig.Signatures()
|
||||||
|
}
|
||||||
98
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
Normal file
98
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package schema1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"errors"
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
"github.com/docker/distribution/manifest"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// referenceManifestBuilder is a type for constructing manifests from schema1
|
||||||
|
// dependencies.
|
||||||
|
type referenceManifestBuilder struct {
|
||||||
|
Manifest
|
||||||
|
pk libtrust.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReferenceManifestBuilder is used to build new manifests for the current
|
||||||
|
// schema version using schema1 dependencies.
|
||||||
|
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
|
||||||
|
tag := ""
|
||||||
|
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||||
|
tag = tagged.Tag()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &referenceManifestBuilder{
|
||||||
|
Manifest: Manifest{
|
||||||
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
},
|
||||||
|
Name: ref.Name(),
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: architecture,
|
||||||
|
},
|
||||||
|
pk: pk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
|
m := mb.Manifest
|
||||||
|
if len(m.FSLayers) == 0 {
|
||||||
|
return nil, errors.New("cannot build manifest with zero layers or history")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
|
||||||
|
m.History = make([]History, len(mb.Manifest.History))
|
||||||
|
copy(m.FSLayers, mb.Manifest.FSLayers)
|
||||||
|
copy(m.History, mb.Manifest.History)
|
||||||
|
|
||||||
|
return Sign(&m, mb.pk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendReference adds a reference to the current ManifestBuilder
|
||||||
|
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
|
||||||
|
r, ok := d.(Reference)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Unable to add non-reference type to v1 builder")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entries need to be prepended
|
||||||
|
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
|
||||||
|
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// References returns the current references added to this builder
|
||||||
|
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
|
||||||
|
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
|
||||||
|
for i := range mb.Manifest.FSLayers {
|
||||||
|
layerDigest := mb.Manifest.FSLayers[i].BlobSum
|
||||||
|
history := mb.Manifest.History[i]
|
||||||
|
ref := Reference{layerDigest, 0, history}
|
||||||
|
refs[i] = ref.Descriptor()
|
||||||
|
}
|
||||||
|
return refs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference describes a manifest v2, schema version 1 dependency.
|
||||||
|
// An FSLayer associated with a history entry.
|
||||||
|
type Reference struct {
|
||||||
|
Digest digest.Digest
|
||||||
|
Size int64 // if we know it, set it for the descriptor.
|
||||||
|
History History
|
||||||
|
}
|
||||||
|
|
||||||
|
// Descriptor describes a reference
|
||||||
|
func (r Reference) Descriptor() distribution.Descriptor {
|
||||||
|
return distribution.Descriptor{
|
||||||
|
MediaType: MediaTypeManifestLayer,
|
||||||
|
Digest: r.Digest,
|
||||||
|
Size: r.Size,
|
||||||
|
}
|
||||||
|
}
|
||||||
68
vendor/github.com/docker/distribution/manifest/schema1/sign.go
generated
vendored
Normal file
68
vendor/github.com/docker/distribution/manifest/schema1/sign.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
package schema1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sign signs the manifest with the provided private key, returning a
|
||||||
|
// SignedManifest. This typically won't be used within the registry, except
|
||||||
|
// for testing.
|
||||||
|
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
|
||||||
|
p, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js, err := libtrust.NewJSONSignature(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := js.Sign(pk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pretty, err := js.PrettySignature("signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SignedManifest{
|
||||||
|
Manifest: *m,
|
||||||
|
all: pretty,
|
||||||
|
Canonical: p,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignWithChain signs the manifest with the given private key and x509 chain.
|
||||||
|
// The public key of the first element in the chain must be the public key
|
||||||
|
// corresponding with the sign key.
|
||||||
|
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
|
||||||
|
p, err := json.MarshalIndent(m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js, err := libtrust.NewJSONSignature(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := js.SignWithChain(key, chain); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pretty, err := js.PrettySignature("signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SignedManifest{
|
||||||
|
Manifest: *m,
|
||||||
|
all: pretty,
|
||||||
|
Canonical: p,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
32
vendor/github.com/docker/distribution/manifest/schema1/verify.go
generated
vendored
Normal file
32
vendor/github.com/docker/distribution/manifest/schema1/verify.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package schema1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verify verifies the signature of the signed manifest returning the public
|
||||||
|
// keys used during signing.
|
||||||
|
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
|
||||||
|
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return js.Verify()
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyChains verifies the signature of the signed manifest against the
|
||||||
|
// certificate pool returning the list of verified chains. Signatures without
|
||||||
|
// an x509 chain are not checked.
|
||||||
|
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||||
|
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return js.VerifyChains(ca)
|
||||||
|
}
|
||||||
80
vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
Normal file
80
vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package schema2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// builder is a type for constructing manifests.
|
||||||
|
type builder struct {
|
||||||
|
// bs is a BlobService used to publish the configuration blob.
|
||||||
|
bs distribution.BlobService
|
||||||
|
|
||||||
|
// configJSON references
|
||||||
|
configJSON []byte
|
||||||
|
|
||||||
|
// layers is a list of layer descriptors that gets built by successive
|
||||||
|
// calls to AppendReference.
|
||||||
|
layers []distribution.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManifestBuilder is used to build new manifests for the current schema
|
||||||
|
// version. It takes a BlobService so it can publish the configuration blob
|
||||||
|
// as part of the Build process.
|
||||||
|
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
|
||||||
|
mb := &builder{
|
||||||
|
bs: bs,
|
||||||
|
configJSON: make([]byte, len(configJSON)),
|
||||||
|
}
|
||||||
|
copy(mb.configJSON, configJSON)
|
||||||
|
|
||||||
|
return mb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build produces a final manifest from the given references.
|
||||||
|
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
|
m := Manifest{
|
||||||
|
Versioned: SchemaVersion,
|
||||||
|
Layers: make([]distribution.Descriptor, len(mb.layers)),
|
||||||
|
}
|
||||||
|
copy(m.Layers, mb.layers)
|
||||||
|
|
||||||
|
configDigest := digest.FromBytes(mb.configJSON)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
m.Config, err = mb.bs.Stat(ctx, configDigest)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
// Override MediaType, since Put always replaces the specified media
|
||||||
|
// type with application/octet-stream in the descriptor it returns.
|
||||||
|
m.Config.MediaType = MediaTypeConfig
|
||||||
|
return FromStruct(m)
|
||||||
|
case distribution.ErrBlobUnknown:
|
||||||
|
// nop
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add config to the blob store
|
||||||
|
m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
|
||||||
|
// Override MediaType, since Put always replaces the specified media
|
||||||
|
// type with application/octet-stream in the descriptor it returns.
|
||||||
|
m.Config.MediaType = MediaTypeConfig
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return FromStruct(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendReference adds a reference to the current ManifestBuilder.
|
||||||
|
func (mb *builder) AppendReference(d distribution.Describable) error {
|
||||||
|
mb.layers = append(mb.layers, d.Descriptor())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// References returns the current references added to this builder.
|
||||||
|
func (mb *builder) References() []distribution.Descriptor {
|
||||||
|
return mb.layers
|
||||||
|
}
|
||||||
128
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
Normal file
128
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
package schema2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
"github.com/docker/distribution/manifest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MediaTypeManifest specifies the mediaType for the current version.
|
||||||
|
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
|
|
||||||
|
// MediaTypeConfig specifies the mediaType for the image configuration.
|
||||||
|
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
|
||||||
|
|
||||||
|
// MediaTypeLayer is the mediaType used for layers referenced by the
|
||||||
|
// manifest.
|
||||||
|
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||||
|
|
||||||
|
// MediaTypeForeignLayer is the mediaType used for layers that must be
|
||||||
|
// downloaded from foreign URLs.
|
||||||
|
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SchemaVersion provides a pre-initialized version structure for this
|
||||||
|
// packages version of the manifest.
|
||||||
|
SchemaVersion = manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
MediaType: MediaTypeManifest,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
|
m := new(DeserializedManifest)
|
||||||
|
err := m.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst := digest.FromBytes(b)
|
||||||
|
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
|
||||||
|
}
|
||||||
|
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manifest defines a schema2 manifest.
|
||||||
|
type Manifest struct {
|
||||||
|
manifest.Versioned
|
||||||
|
|
||||||
|
// Config references the image configuration as a blob.
|
||||||
|
Config distribution.Descriptor `json:"config"`
|
||||||
|
|
||||||
|
// Layers lists descriptors for the layers referenced by the
|
||||||
|
// configuration.
|
||||||
|
Layers []distribution.Descriptor `json:"layers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// References returnes the descriptors of this manifests references.
|
||||||
|
func (m Manifest) References() []distribution.Descriptor {
|
||||||
|
return m.Layers
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target returns the target of this signed manifest.
|
||||||
|
func (m Manifest) Target() distribution.Descriptor {
|
||||||
|
return m.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeserializedManifest wraps Manifest with a copy of the original JSON.
|
||||||
|
// It satisfies the distribution.Manifest interface.
|
||||||
|
type DeserializedManifest struct {
|
||||||
|
Manifest
|
||||||
|
|
||||||
|
// canonical is the canonical byte representation of the Manifest.
|
||||||
|
canonical []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
|
||||||
|
// DeserializedManifest which contains the manifest and its JSON representation.
|
||||||
|
func FromStruct(m Manifest) (*DeserializedManifest, error) {
|
||||||
|
var deserialized DeserializedManifest
|
||||||
|
deserialized.Manifest = m
|
||||||
|
|
||||||
|
var err error
|
||||||
|
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
|
||||||
|
return &deserialized, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON populates a new Manifest struct from JSON data.
|
||||||
|
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
|
||||||
|
m.canonical = make([]byte, len(b), len(b))
|
||||||
|
// store manifest in canonical
|
||||||
|
copy(m.canonical, b)
|
||||||
|
|
||||||
|
// Unmarshal canonical JSON into Manifest object
|
||||||
|
var manifest Manifest
|
||||||
|
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Manifest = manifest
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the contents of canonical. If canonical is empty,
|
||||||
|
// marshals the inner contents.
|
||||||
|
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
|
||||||
|
if len(m.canonical) > 0 {
|
||||||
|
return m.canonical, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Payload returns the raw content of the manifest. The contents can be used to
|
||||||
|
// calculate the content identifier.
|
||||||
|
func (m DeserializedManifest) Payload() (string, []byte, error) {
|
||||||
|
return m.MediaType, m.canonical, nil
|
||||||
|
}
|
||||||
12
vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
Normal file
12
vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package manifest
|
||||||
|
|
||||||
|
// Versioned provides a struct with the manifest schemaVersion and . Incoming
|
||||||
|
// content with unknown schema version can be decoded against this struct to
|
||||||
|
// check the version.
|
||||||
|
type Versioned struct {
|
||||||
|
// SchemaVersion is the image manifest schema that this image follows
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
|
||||||
|
// MediaType is the media type of this schema.
|
||||||
|
MediaType string `json:"mediaType,omitempty"`
|
||||||
|
}
|
||||||
13
vendor/github.com/docker/libtrust/CONTRIBUTING.md
generated
vendored
Normal file
13
vendor/github.com/docker/libtrust/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Contributing to libtrust
|
||||||
|
|
||||||
|
Want to hack on libtrust? Awesome! Here are instructions to get you
|
||||||
|
started.
|
||||||
|
|
||||||
|
libtrust is a part of the [Docker](https://www.docker.com) project, and follows
|
||||||
|
the same rules and principles. If you're already familiar with the way
|
||||||
|
Docker does things, you'll feel right at home.
|
||||||
|
|
||||||
|
Otherwise, go read
|
||||||
|
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
|
Happy hacking!
|
||||||
191
vendor/github.com/docker/libtrust/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/libtrust/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2014 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
3
vendor/github.com/docker/libtrust/MAINTAINERS
generated
vendored
Normal file
3
vendor/github.com/docker/libtrust/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Solomon Hykes <solomon@docker.com>
|
||||||
|
Josh Hawn <josh@docker.com> (github: jlhawn)
|
||||||
|
Derek McGowan <derek@docker.com> (github: dmcgowan)
|
||||||
18
vendor/github.com/docker/libtrust/README.md
generated
vendored
Normal file
18
vendor/github.com/docker/libtrust/README.md
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# libtrust
|
||||||
|
|
||||||
|
Libtrust is library for managing authentication and authorization using public key cryptography.
|
||||||
|
|
||||||
|
Authentication is handled using the identity attached to the public key.
|
||||||
|
Libtrust provides multiple methods to prove possession of the private key associated with an identity.
|
||||||
|
- TLS x509 certificates
|
||||||
|
- Signature verification
|
||||||
|
- Key Challenge
|
||||||
|
|
||||||
|
Authorization and access control is managed through a distributed trust graph.
|
||||||
|
Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
|
||||||
|
|
||||||
|
## Copyright and license
|
||||||
|
|
||||||
|
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||||
|
Docs released under Creative commons.
|
||||||
|
|
||||||
175
vendor/github.com/docker/libtrust/certificates.go
generated
vendored
Normal file
175
vendor/github.com/docker/libtrust/certificates.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type certTemplateInfo struct {
|
||||||
|
commonName string
|
||||||
|
domains []string
|
||||||
|
ipAddresses []net.IP
|
||||||
|
isCA bool
|
||||||
|
clientAuth bool
|
||||||
|
serverAuth bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
|
||||||
|
// Generate a certificate template which is valid from the past week to
|
||||||
|
// 10 years from now. The usage of the certificate depends on the
|
||||||
|
// specified fields in the given certTempInfo object.
|
||||||
|
var (
|
||||||
|
keyUsage x509.KeyUsage
|
||||||
|
extKeyUsage []x509.ExtKeyUsage
|
||||||
|
)
|
||||||
|
|
||||||
|
if info.isCA {
|
||||||
|
keyUsage = x509.KeyUsageCertSign
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.clientAuth {
|
||||||
|
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.serverAuth {
|
||||||
|
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &x509.Certificate{
|
||||||
|
SerialNumber: big.NewInt(0),
|
||||||
|
Subject: pkix.Name{
|
||||||
|
CommonName: info.commonName,
|
||||||
|
},
|
||||||
|
NotBefore: time.Now().Add(-time.Hour * 24 * 7),
|
||||||
|
NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
|
||||||
|
DNSNames: info.domains,
|
||||||
|
IPAddresses: info.ipAddresses,
|
||||||
|
IsCA: info.isCA,
|
||||||
|
KeyUsage: keyUsage,
|
||||||
|
ExtKeyUsage: extKeyUsage,
|
||||||
|
BasicConstraintsValid: info.isCA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
|
||||||
|
pubCertTemplate := generateCertTemplate(subInfo)
|
||||||
|
privCertTemplate := generateCertTemplate(issInfo)
|
||||||
|
|
||||||
|
certDER, err := x509.CreateCertificate(
|
||||||
|
rand.Reader, pubCertTemplate, privCertTemplate,
|
||||||
|
pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err = x509.ParseCertificate(certDER)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSelfSignedServerCert creates a self-signed certificate for the
|
||||||
|
// given key which is to be used for TLS servers with the given domains and
|
||||||
|
// IP addresses.
|
||||||
|
func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
|
||||||
|
info := &certTemplateInfo{
|
||||||
|
commonName: key.KeyID(),
|
||||||
|
domains: domains,
|
||||||
|
ipAddresses: ipAddresses,
|
||||||
|
serverAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateCert(key.PublicKey(), key, info, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSelfSignedClientCert creates a self-signed certificate for the
|
||||||
|
// given key which is to be used for TLS clients.
|
||||||
|
func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
|
||||||
|
info := &certTemplateInfo{
|
||||||
|
commonName: key.KeyID(),
|
||||||
|
clientAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateCert(key.PublicKey(), key, info, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateCACert creates a certificate which can be used as a trusted
|
||||||
|
// certificate authority.
|
||||||
|
func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
|
||||||
|
subjectInfo := &certTemplateInfo{
|
||||||
|
commonName: trustedKey.KeyID(),
|
||||||
|
isCA: true,
|
||||||
|
}
|
||||||
|
issuerInfo := &certTemplateInfo{
|
||||||
|
commonName: signer.KeyID(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateCACertPool creates a certificate authority pool to be used for a
|
||||||
|
// TLS configuration. Any self-signed certificates issued by the specified
|
||||||
|
// trusted keys will be verified during a TLS handshake
|
||||||
|
func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
|
||||||
|
certPool := x509.NewCertPool()
|
||||||
|
|
||||||
|
for _, trustedKey := range trustedKeys {
|
||||||
|
cert, err := GenerateCACert(signer, trustedKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certPool.AddCert(cert)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certPool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
|
||||||
|
// containing one or more certificates. The expected pem type is "CERTIFICATE".
|
||||||
|
func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
|
||||||
|
b, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
certificates := []*x509.Certificate{}
|
||||||
|
var block *pem.Block
|
||||||
|
block, b = pem.Decode(b)
|
||||||
|
for ; block != nil; block, b = pem.Decode(b) {
|
||||||
|
if block.Type == "CERTIFICATE" {
|
||||||
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
certificates = append(certificates, cert)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return certificates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
|
||||||
|
// containing one or more certificates. The expected pem type is "CERTIFICATE".
|
||||||
|
func LoadCertificatePool(filename string) (*x509.CertPool, error) {
|
||||||
|
certs, err := LoadCertificateBundle(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
for _, cert := range certs {
|
||||||
|
pool.AddCert(cert)
|
||||||
|
}
|
||||||
|
return pool, nil
|
||||||
|
}
|
||||||
9
vendor/github.com/docker/libtrust/doc.go
generated
vendored
Normal file
9
vendor/github.com/docker/libtrust/doc.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
/*
|
||||||
|
Package libtrust provides an interface for managing authentication and
|
||||||
|
authorization using public key cryptography. Authentication is handled
|
||||||
|
using the identity attached to the public key and verified through TLS
|
||||||
|
x509 certificates, a key challenge, or signature. Authorization and
|
||||||
|
access control is managed through a trust graph distributed between
|
||||||
|
both remote trust servers and locally cached and managed data.
|
||||||
|
*/
|
||||||
|
package libtrust
|
||||||
428
vendor/github.com/docker/libtrust/ec_key.go
generated
vendored
Normal file
428
vendor/github.com/docker/libtrust/ec_key.go
generated
vendored
Normal file
@ -0,0 +1,428 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EC DSA PUBLIC KEY
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
|
||||||
|
// signature algorithms.
|
||||||
|
type ecPublicKey struct {
|
||||||
|
*ecdsa.PublicKey
|
||||||
|
curveName string
|
||||||
|
signatureAlgorithm *signatureAlgorithm
|
||||||
|
extended map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
|
||||||
|
curve := cryptoPublicKey.Curve
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case curve == elliptic.P256():
|
||||||
|
return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
|
||||||
|
case curve == elliptic.P384():
|
||||||
|
return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
|
||||||
|
case curve == elliptic.P521():
|
||||||
|
return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
|
||||||
|
default:
|
||||||
|
return nil, errors.New("unsupported elliptic curve")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyType returns the key type for elliptic curve keys, i.e., "EC".
|
||||||
|
func (k *ecPublicKey) KeyType() string {
|
||||||
|
return "EC"
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurveName returns the elliptic curve identifier.
|
||||||
|
// Possible values are "P-256", "P-384", and "P-521".
|
||||||
|
func (k *ecPublicKey) CurveName() string {
|
||||||
|
return k.curveName
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||||
|
func (k *ecPublicKey) KeyID() string {
|
||||||
|
return keyIDFromCryptoKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPublicKey) String() string {
|
||||||
|
return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifyies the signature of the data in the io.Reader using this
|
||||||
|
// PublicKey. The alg parameter should identify the digital signature
|
||||||
|
// algorithm which was used to produce the signature and should be supported
|
||||||
|
// by this public key. Returns a nil error if the signature is valid.
|
||||||
|
func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
|
||||||
|
// For EC keys there is only one supported signature algorithm depending
|
||||||
|
// on the curve parameters.
|
||||||
|
if k.signatureAlgorithm.HeaderParam() != alg {
|
||||||
|
return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// signature is the concatenation of (r, s), base64Url encoded.
|
||||||
|
sigLength := len(signature)
|
||||||
|
expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
|
||||||
|
if sigLength != expectedOctetLength {
|
||||||
|
return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
|
||||||
|
r := new(big.Int).SetBytes(rBytes)
|
||||||
|
s := new(big.Int).SetBytes(sBytes)
|
||||||
|
|
||||||
|
hasher := k.signatureAlgorithm.HashID().New()
|
||||||
|
_, err := io.Copy(hasher, data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading data to sign: %s", err)
|
||||||
|
}
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
|
||||||
|
if !ecdsa.Verify(k.PublicKey, hash, r, s) {
|
||||||
|
return errors.New("invalid signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoPublicKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The type
|
||||||
|
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||||
|
return k.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPublicKey) toMap() map[string]interface{} {
|
||||||
|
jwk := make(map[string]interface{})
|
||||||
|
for k, v := range k.extended {
|
||||||
|
jwk[k] = v
|
||||||
|
}
|
||||||
|
jwk["kty"] = k.KeyType()
|
||||||
|
jwk["kid"] = k.KeyID()
|
||||||
|
jwk["crv"] = k.CurveName()
|
||||||
|
|
||||||
|
xBytes := k.X.Bytes()
|
||||||
|
yBytes := k.Y.Bytes()
|
||||||
|
octetLength := (k.Params().BitSize + 7) >> 3
|
||||||
|
// MUST include leading zeros in the output so that x, y are each
|
||||||
|
// *octetLength* bytes long.
|
||||||
|
xBuf := make([]byte, octetLength-len(xBytes), octetLength)
|
||||||
|
yBuf := make([]byte, octetLength-len(yBytes), octetLength)
|
||||||
|
xBuf = append(xBuf, xBytes...)
|
||||||
|
yBuf = append(yBuf, yBytes...)
|
||||||
|
|
||||||
|
jwk["x"] = joseBase64UrlEncode(xBuf)
|
||||||
|
jwk["y"] = joseBase64UrlEncode(yBuf)
|
||||||
|
|
||||||
|
return jwk
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
|
||||||
|
// elliptic curve keys.
|
||||||
|
func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
|
||||||
|
return json.Marshal(k.toMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
|
||||||
|
func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
|
||||||
|
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
|
||||||
|
}
|
||||||
|
k.extended["kid"] = k.KeyID() // For display purposes.
|
||||||
|
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
|
||||||
|
k.extended[field] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPublicKey) GetExtendedField(field string) interface{} {
|
||||||
|
v, ok := k.extended[field]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
|
||||||
|
// JWK key type (kty) has already been determined to be "EC".
|
||||||
|
// Need to extract 'crv', 'x', 'y', and 'kid' and check for
|
||||||
|
// consistency.
|
||||||
|
|
||||||
|
// Get the curve identifier value.
|
||||||
|
crv, err := stringFromMap(jwk, "crv")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
curve elliptic.Curve
|
||||||
|
sigAlg *signatureAlgorithm
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case crv == "P-256":
|
||||||
|
curve = elliptic.P256()
|
||||||
|
sigAlg = es256
|
||||||
|
case crv == "P-384":
|
||||||
|
curve = elliptic.P384()
|
||||||
|
sigAlg = es384
|
||||||
|
case crv == "P-521":
|
||||||
|
curve = elliptic.P521()
|
||||||
|
sigAlg = es512
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the X and Y coordinates for the public key point.
|
||||||
|
xB64Url, err := stringFromMap(jwk, "x")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
|
||||||
|
}
|
||||||
|
x, err := parseECCoordinate(xB64Url, curve)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
yB64Url, err := stringFromMap(jwk, "y")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
|
||||||
|
}
|
||||||
|
y, err := parseECCoordinate(yB64Url, curve)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := &ecPublicKey{
|
||||||
|
PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
|
||||||
|
curveName: crv, signatureAlgorithm: sigAlg,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key ID is optional too, but if it exists, it should match the key.
|
||||||
|
_, ok := jwk["kid"]
|
||||||
|
if ok {
|
||||||
|
kid, err := stringFromMap(jwk, "kid")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
|
||||||
|
}
|
||||||
|
if kid != key.KeyID() {
|
||||||
|
return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key.extended = jwk
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EC DSA PRIVATE KEY
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
|
||||||
|
// algorithms.
|
||||||
|
type ecPrivateKey struct {
|
||||||
|
ecPublicKey
|
||||||
|
*ecdsa.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
|
||||||
|
publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicKey returns the Public Key data associated with this Private Key.
|
||||||
|
func (k *ecPrivateKey) PublicKey() PublicKey {
|
||||||
|
return &k.ecPublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPrivateKey) String() string {
|
||||||
|
return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs the data read from the io.Reader using a signature algorithm supported
|
||||||
|
// by the elliptic curve private key. If the specified hashing algorithm is
|
||||||
|
// supported by this key, that hash function is used to generate the signature
|
||||||
|
// otherwise the the default hashing algorithm for this key is used. Returns
|
||||||
|
// the signature and the name of the JWK signature algorithm used, e.g.,
|
||||||
|
// "ES256", "ES384", "ES512".
|
||||||
|
func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
|
||||||
|
// Generate a signature of the data using the internal alg.
|
||||||
|
// The given hashId is only a suggestion, and since EC keys only support
|
||||||
|
// on signature/hash algorithm given the curve name, we disregard it for
|
||||||
|
// the elliptic curve JWK signature implementation.
|
||||||
|
hasher := k.signatureAlgorithm.HashID().New()
|
||||||
|
_, err = io.Copy(hasher, data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
|
||||||
|
}
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
|
||||||
|
r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("error producing signature: %s", err)
|
||||||
|
}
|
||||||
|
rBytes, sBytes := r.Bytes(), s.Bytes()
|
||||||
|
octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
|
||||||
|
// MUST include leading zeros in the output
|
||||||
|
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
|
||||||
|
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
|
||||||
|
|
||||||
|
rBuf = append(rBuf, rBytes...)
|
||||||
|
sBuf = append(sBuf, sBytes...)
|
||||||
|
|
||||||
|
signature = append(rBuf, sBuf...)
|
||||||
|
alg = k.signatureAlgorithm.HeaderParam()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoPrivateKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The type
|
||||||
|
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
|
||||||
|
return k.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ecPrivateKey) toMap() map[string]interface{} {
|
||||||
|
jwk := k.ecPublicKey.toMap()
|
||||||
|
|
||||||
|
dBytes := k.D.Bytes()
|
||||||
|
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
||||||
|
// octets (where n is the order of the curve). This is because the private
|
||||||
|
// key d must be in the interval [1, n-1] so the bitlength of d should be
|
||||||
|
// no larger than the bitlength of n-1. The easiest way to find the octet
|
||||||
|
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
|
||||||
|
// bit sequence right by 3, which is essentially dividing by 8 and adding
|
||||||
|
// 1 if there is any remainder. Thus, the private key value d should be
|
||||||
|
// output to (bitlength(n-1)+7)>>3 octets.
|
||||||
|
n := k.ecPublicKey.Params().N
|
||||||
|
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
|
||||||
|
// Create a buffer with the necessary zero-padding.
|
||||||
|
dBuf := make([]byte, octetLength-len(dBytes), octetLength)
|
||||||
|
dBuf = append(dBuf, dBytes...)
|
||||||
|
|
||||||
|
jwk["d"] = joseBase64UrlEncode(dBuf)
|
||||||
|
|
||||||
|
return jwk
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
|
||||||
|
// elliptic curve keys.
|
||||||
|
func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
|
||||||
|
return json.Marshal(k.toMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
|
||||||
|
func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
|
||||||
|
derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
|
||||||
|
}
|
||||||
|
k.extended["keyID"] = k.KeyID() // For display purposes.
|
||||||
|
return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
|
||||||
|
dB64Url, err := stringFromMap(jwk, "d")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Private Key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWK key type (kty) has already been determined to be "EC".
|
||||||
|
// Need to extract the public key information, then extract the private
|
||||||
|
// key value 'd'.
|
||||||
|
publicKey, err := ecPublicKeyFromMap(jwk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := &ecPrivateKey{
|
||||||
|
ecPublicKey: *publicKey,
|
||||||
|
PrivateKey: &ecdsa.PrivateKey{
|
||||||
|
PublicKey: *publicKey.PublicKey,
|
||||||
|
D: d,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Key Generation Functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
|
||||||
|
k = new(ecPrivateKey)
|
||||||
|
k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
|
||||||
|
k.extended = make(map[string]interface{})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
|
||||||
|
func GenerateECP256PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateECPrivateKey(elliptic.P256())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k.curveName = "P-256"
|
||||||
|
k.signatureAlgorithm = es256
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
|
||||||
|
func GenerateECP384PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateECPrivateKey(elliptic.P384())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k.curveName = "P-384"
|
||||||
|
k.signatureAlgorithm = es384
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
|
||||||
|
func GenerateECP521PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateECPrivateKey(elliptic.P521())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k.curveName = "P-521"
|
||||||
|
k.signatureAlgorithm = es512
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
50
vendor/github.com/docker/libtrust/filter.go
generated
vendored
Normal file
50
vendor/github.com/docker/libtrust/filter.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilterByHosts filters the list of PublicKeys to only those which contain a
|
||||||
|
// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
|
||||||
|
// then keys which do not specify any hosts are also returned.
|
||||||
|
func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
|
||||||
|
filtered := make([]PublicKey, 0, len(keys))
|
||||||
|
|
||||||
|
for _, pubKey := range keys {
|
||||||
|
var hosts []string
|
||||||
|
switch v := pubKey.GetExtendedField("hosts").(type) {
|
||||||
|
case []string:
|
||||||
|
hosts = v
|
||||||
|
case []interface{}:
|
||||||
|
for _, value := range v {
|
||||||
|
h, ok := value.(string)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hosts = append(hosts, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
if includeEmpty {
|
||||||
|
filtered = append(filtered, pubKey)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if any hosts match pattern
|
||||||
|
for _, hostPattern := range hosts {
|
||||||
|
match, err := filepath.Match(hostPattern, host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
filtered = append(filtered, pubKey)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
56
vendor/github.com/docker/libtrust/hash.go
generated
vendored
Normal file
56
vendor/github.com/docker/libtrust/hash.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
_ "crypto/sha256" // Registrer SHA224 and SHA256
|
||||||
|
_ "crypto/sha512" // Registrer SHA384 and SHA512
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type signatureAlgorithm struct {
|
||||||
|
algHeaderParam string
|
||||||
|
hashID crypto.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *signatureAlgorithm) HeaderParam() string {
|
||||||
|
return h.algHeaderParam
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *signatureAlgorithm) HashID() crypto.Hash {
|
||||||
|
return h.hashID
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
|
||||||
|
rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
|
||||||
|
rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
|
||||||
|
es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
|
||||||
|
es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
|
||||||
|
es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
|
||||||
|
)
|
||||||
|
|
||||||
|
func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
|
||||||
|
switch {
|
||||||
|
case alg == "RS256":
|
||||||
|
return rs256, nil
|
||||||
|
case alg == "RS384":
|
||||||
|
return rs384, nil
|
||||||
|
case alg == "RS512":
|
||||||
|
return rs512, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
|
||||||
|
switch {
|
||||||
|
case hashID == crypto.SHA512:
|
||||||
|
return rs512
|
||||||
|
case hashID == crypto.SHA384:
|
||||||
|
return rs384
|
||||||
|
case hashID == crypto.SHA256:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
return rs256
|
||||||
|
}
|
||||||
|
}
|
||||||
657
vendor/github.com/docker/libtrust/jsonsign.go
generated
vendored
Normal file
657
vendor/github.com/docker/libtrust/jsonsign.go
generated
vendored
Normal file
@ -0,0 +1,657 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrInvalidSignContent is used when the content to be signed is invalid.
|
||||||
|
ErrInvalidSignContent = errors.New("invalid sign content")
|
||||||
|
|
||||||
|
// ErrInvalidJSONContent is used when invalid json is encountered.
|
||||||
|
ErrInvalidJSONContent = errors.New("invalid json content")
|
||||||
|
|
||||||
|
// ErrMissingSignatureKey is used when the specified signature key
|
||||||
|
// does not exist in the JSON content.
|
||||||
|
ErrMissingSignatureKey = errors.New("missing signature key")
|
||||||
|
)
|
||||||
|
|
||||||
|
type jsHeader struct {
|
||||||
|
JWK PublicKey `json:"jwk,omitempty"`
|
||||||
|
Algorithm string `json:"alg"`
|
||||||
|
Chain []string `json:"x5c,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsSignature struct {
|
||||||
|
Header jsHeader `json:"header"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Protected string `json:"protected,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsSignaturesSorted []jsSignature
|
||||||
|
|
||||||
|
func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
|
||||||
|
func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
|
||||||
|
|
||||||
|
func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
|
||||||
|
ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
|
||||||
|
si, sj := jsbkid[i].Signature, jsbkid[j].Signature
|
||||||
|
|
||||||
|
if ki == kj {
|
||||||
|
return si < sj
|
||||||
|
}
|
||||||
|
|
||||||
|
return ki < kj
|
||||||
|
}
|
||||||
|
|
||||||
|
type signKey struct {
|
||||||
|
PrivateKey
|
||||||
|
Chain []*x509.Certificate
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONSignature represents a signature of a json object.
|
||||||
|
type JSONSignature struct {
|
||||||
|
payload string
|
||||||
|
signatures []jsSignature
|
||||||
|
indent string
|
||||||
|
formatLength int
|
||||||
|
formatTail []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJSONSignature() *JSONSignature {
|
||||||
|
return &JSONSignature{
|
||||||
|
signatures: make([]jsSignature, 0, 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Payload returns the encoded payload of the signature. This
|
||||||
|
// payload should not be signed directly
|
||||||
|
func (js *JSONSignature) Payload() ([]byte, error) {
|
||||||
|
return joseBase64UrlDecode(js.payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (js *JSONSignature) protectedHeader() (string, error) {
|
||||||
|
protected := map[string]interface{}{
|
||||||
|
"formatLength": js.formatLength,
|
||||||
|
"formatTail": joseBase64UrlEncode(js.formatTail),
|
||||||
|
"time": time.Now().UTC().Format(time.RFC3339),
|
||||||
|
}
|
||||||
|
protectedBytes, err := json.Marshal(protected)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return joseBase64UrlEncode(protectedBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
|
||||||
|
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
|
||||||
|
copy(buf, protectedHeader)
|
||||||
|
buf[len(protectedHeader)] = '.'
|
||||||
|
copy(buf[len(protectedHeader)+1:], js.payload)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign adds a signature using the given private key.
|
||||||
|
func (js *JSONSignature) Sign(key PrivateKey) error {
|
||||||
|
protected, err := js.protectedHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
signBytes, err := js.signBytes(protected)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
js.signatures = append(js.signatures, jsSignature{
|
||||||
|
Header: jsHeader{
|
||||||
|
JWK: key.PublicKey(),
|
||||||
|
Algorithm: algorithm,
|
||||||
|
},
|
||||||
|
Signature: joseBase64UrlEncode(sigBytes),
|
||||||
|
Protected: protected,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignWithChain adds a signature using the given private key
|
||||||
|
// and setting the x509 chain. The public key of the first element
|
||||||
|
// in the chain must be the public key corresponding with the sign key.
|
||||||
|
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
|
||||||
|
// Ensure key.Chain[0] is public key for key
|
||||||
|
//key.Chain.PublicKey
|
||||||
|
//key.PublicKey().CryptoPublicKey()
|
||||||
|
|
||||||
|
// Verify chain
|
||||||
|
protected, err := js.protectedHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
signBytes, err := js.signBytes(protected)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
header := jsHeader{
|
||||||
|
Chain: make([]string, len(chain)),
|
||||||
|
Algorithm: algorithm,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, cert := range chain {
|
||||||
|
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
js.signatures = append(js.signatures, jsSignature{
|
||||||
|
Header: header,
|
||||||
|
Signature: joseBase64UrlEncode(sigBytes),
|
||||||
|
Protected: protected,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifies all the signatures and returns the list of
|
||||||
|
// public keys used to sign. Any x509 chains are not checked.
|
||||||
|
func (js *JSONSignature) Verify() ([]PublicKey, error) {
|
||||||
|
keys := make([]PublicKey, len(js.signatures))
|
||||||
|
for i, signature := range js.signatures {
|
||||||
|
signBytes, err := js.signBytes(signature.Protected)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var publicKey PublicKey
|
||||||
|
if len(signature.Header.Chain) > 0 {
|
||||||
|
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cert, err := x509.ParseCertificate(certBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if signature.Header.JWK != nil {
|
||||||
|
publicKey = signature.Header.JWK
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("missing public key")
|
||||||
|
}
|
||||||
|
|
||||||
|
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
keys[i] = publicKey
|
||||||
|
}
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyChains verifies all the signatures and the chains associated
|
||||||
|
// with each signature and returns the list of verified chains.
|
||||||
|
// Signatures without an x509 chain are not checked.
|
||||||
|
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||||
|
chains := make([][]*x509.Certificate, 0, len(js.signatures))
|
||||||
|
for _, signature := range js.signatures {
|
||||||
|
signBytes, err := js.signBytes(signature.Protected)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var publicKey PublicKey
|
||||||
|
if len(signature.Header.Chain) > 0 {
|
||||||
|
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cert, err := x509.ParseCertificate(certBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
intermediates := x509.NewCertPool()
|
||||||
|
if len(signature.Header.Chain) > 1 {
|
||||||
|
intermediateChain := signature.Header.Chain[1:]
|
||||||
|
for i := range intermediateChain {
|
||||||
|
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
intermediate, err := x509.ParseCertificate(certBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
intermediates.AddCert(intermediate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyOptions := x509.VerifyOptions{
|
||||||
|
Intermediates: intermediates,
|
||||||
|
Roots: ca,
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedChains, err := cert.Verify(verifyOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
chains = append(chains, verifiedChains...)
|
||||||
|
|
||||||
|
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return chains, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWS returns JSON serialized JWS according to
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
|
||||||
|
func (js *JSONSignature) JWS() ([]byte, error) {
|
||||||
|
if len(js.signatures) == 0 {
|
||||||
|
return nil, errors.New("missing signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||||
|
|
||||||
|
jsonMap := map[string]interface{}{
|
||||||
|
"payload": js.payload,
|
||||||
|
"signatures": js.signatures,
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.MarshalIndent(jsonMap, "", " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func notSpace(r rune) bool {
|
||||||
|
return !unicode.IsSpace(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func detectJSONIndent(jsonContent []byte) (indent string) {
|
||||||
|
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
|
||||||
|
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
|
||||||
|
if quoteIndex > 0 {
|
||||||
|
indent = string(jsonContent[2 : quoteIndex+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsParsedHeader struct {
|
||||||
|
JWK json.RawMessage `json:"jwk"`
|
||||||
|
Algorithm string `json:"alg"`
|
||||||
|
Chain []string `json:"x5c"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsParsedSignature struct {
|
||||||
|
Header jsParsedHeader `json:"header"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Protected string `json:"protected"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
|
||||||
|
func ParseJWS(content []byte) (*JSONSignature, error) {
|
||||||
|
type jsParsed struct {
|
||||||
|
Payload string `json:"payload"`
|
||||||
|
Signatures []jsParsedSignature `json:"signatures"`
|
||||||
|
}
|
||||||
|
parsed := &jsParsed{}
|
||||||
|
err := json.Unmarshal(content, parsed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(parsed.Signatures) == 0 {
|
||||||
|
return nil, errors.New("missing signatures")
|
||||||
|
}
|
||||||
|
payload, err := joseBase64UrlDecode(parsed.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js, err := NewJSONSignature(payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
js.signatures = make([]jsSignature, len(parsed.Signatures))
|
||||||
|
for i, signature := range parsed.Signatures {
|
||||||
|
header := jsHeader{
|
||||||
|
Algorithm: signature.Header.Algorithm,
|
||||||
|
}
|
||||||
|
if signature.Header.Chain != nil {
|
||||||
|
header.Chain = signature.Header.Chain
|
||||||
|
}
|
||||||
|
if signature.Header.JWK != nil {
|
||||||
|
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
header.JWK = publicKey
|
||||||
|
}
|
||||||
|
js.signatures[i] = jsSignature{
|
||||||
|
Header: header,
|
||||||
|
Signature: signature.Signature,
|
||||||
|
Protected: signature.Protected,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return js, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSONSignature returns a new unsigned JWS from a json byte array.
|
||||||
|
// JSONSignature will need to be signed before serializing or storing.
|
||||||
|
// Optionally, one or more signatures can be provided as byte buffers,
|
||||||
|
// containing serialized JWS signatures, to assemble a fully signed JWS
|
||||||
|
// package. It is the callers responsibility to ensure uniqueness of the
|
||||||
|
// provided signatures.
|
||||||
|
func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
|
||||||
|
var dataMap map[string]interface{}
|
||||||
|
err := json.Unmarshal(content, &dataMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
js := newJSONSignature()
|
||||||
|
js.indent = detectJSONIndent(content)
|
||||||
|
|
||||||
|
js.payload = joseBase64UrlEncode(content)
|
||||||
|
|
||||||
|
// Find trailing } and whitespace, put in protected header
|
||||||
|
closeIndex := bytes.LastIndexFunc(content, notSpace)
|
||||||
|
if content[closeIndex] != '}' {
|
||||||
|
return nil, ErrInvalidJSONContent
|
||||||
|
}
|
||||||
|
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
|
||||||
|
if content[lastRuneIndex] == ',' {
|
||||||
|
return nil, ErrInvalidJSONContent
|
||||||
|
}
|
||||||
|
js.formatLength = lastRuneIndex + 1
|
||||||
|
js.formatTail = content[js.formatLength:]
|
||||||
|
|
||||||
|
if len(signatures) > 0 {
|
||||||
|
for _, signature := range signatures {
|
||||||
|
var parsedJSig jsParsedSignature
|
||||||
|
|
||||||
|
if err := json.Unmarshal(signature, &parsedJSig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): A lot of the code below is repeated in
|
||||||
|
// ParseJWS. It will require more refactoring to fix that.
|
||||||
|
jsig := jsSignature{
|
||||||
|
Header: jsHeader{
|
||||||
|
Algorithm: parsedJSig.Header.Algorithm,
|
||||||
|
},
|
||||||
|
Signature: parsedJSig.Signature,
|
||||||
|
Protected: parsedJSig.Protected,
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsedJSig.Header.Chain != nil {
|
||||||
|
jsig.Header.Chain = parsedJSig.Header.Chain
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsedJSig.Header.JWK != nil {
|
||||||
|
publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
jsig.Header.JWK = publicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
js.signatures = append(js.signatures, jsig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return js, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
|
||||||
|
// struct. JWS will need to be signed before serializing or storing.
|
||||||
|
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
|
||||||
|
switch content.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
case struct{}:
|
||||||
|
default:
|
||||||
|
return nil, errors.New("invalid data type")
|
||||||
|
}
|
||||||
|
|
||||||
|
js := newJSONSignature()
|
||||||
|
js.indent = " "
|
||||||
|
|
||||||
|
payload, err := json.MarshalIndent(content, "", js.indent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
js.payload = joseBase64UrlEncode(payload)
|
||||||
|
|
||||||
|
// Remove '\n}' from formatted section, put in protected header
|
||||||
|
js.formatLength = len(payload) - 2
|
||||||
|
js.formatTail = payload[js.formatLength:]
|
||||||
|
|
||||||
|
return js, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
|
||||||
|
value, ok := m[key]
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
switch v := value.(type) {
|
||||||
|
case int:
|
||||||
|
return v, true
|
||||||
|
case float64:
|
||||||
|
return int(v), true
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
|
||||||
|
value, ok := m[key]
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
v, ok = value.(string)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePrettySignature parses a formatted signature into a
|
||||||
|
// JSON signature. If the signatures are missing the format information
|
||||||
|
// an error is thrown. The formatted signature must be created by
|
||||||
|
// the same method as format signature.
|
||||||
|
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
|
||||||
|
var contentMap map[string]json.RawMessage
|
||||||
|
err := json.Unmarshal(content, &contentMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling content: %s", err)
|
||||||
|
}
|
||||||
|
sigMessage, ok := contentMap[signatureKey]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrMissingSignatureKey
|
||||||
|
}
|
||||||
|
|
||||||
|
var signatureBlocks []jsParsedSignature
|
||||||
|
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
js := newJSONSignature()
|
||||||
|
js.signatures = make([]jsSignature, len(signatureBlocks))
|
||||||
|
|
||||||
|
for i, signatureBlock := range signatureBlocks {
|
||||||
|
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("base64 decode error: %s", err)
|
||||||
|
}
|
||||||
|
var protectedHeader map[string]interface{}
|
||||||
|
err = json.Unmarshal(protectedBytes, &protectedHeader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("missing formatted length")
|
||||||
|
}
|
||||||
|
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("missing formatted tail")
|
||||||
|
}
|
||||||
|
formatTail, err := joseBase64UrlDecode(encodedTail)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
|
||||||
|
}
|
||||||
|
if js.formatLength == 0 {
|
||||||
|
js.formatLength = formatLength
|
||||||
|
} else if js.formatLength != formatLength {
|
||||||
|
return nil, errors.New("conflicting format length")
|
||||||
|
}
|
||||||
|
if len(js.formatTail) == 0 {
|
||||||
|
js.formatTail = formatTail
|
||||||
|
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
|
||||||
|
return nil, errors.New("conflicting format tail")
|
||||||
|
}
|
||||||
|
|
||||||
|
header := jsHeader{
|
||||||
|
Algorithm: signatureBlock.Header.Algorithm,
|
||||||
|
Chain: signatureBlock.Header.Chain,
|
||||||
|
}
|
||||||
|
if signatureBlock.Header.JWK != nil {
|
||||||
|
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
|
||||||
|
}
|
||||||
|
header.JWK = publicKey
|
||||||
|
}
|
||||||
|
js.signatures[i] = jsSignature{
|
||||||
|
Header: header,
|
||||||
|
Signature: signatureBlock.Signature,
|
||||||
|
Protected: signatureBlock.Protected,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if js.formatLength > len(content) {
|
||||||
|
return nil, errors.New("invalid format length")
|
||||||
|
}
|
||||||
|
formatted := make([]byte, js.formatLength+len(js.formatTail))
|
||||||
|
copy(formatted, content[:js.formatLength])
|
||||||
|
copy(formatted[js.formatLength:], js.formatTail)
|
||||||
|
js.indent = detectJSONIndent(formatted)
|
||||||
|
js.payload = joseBase64UrlEncode(formatted)
|
||||||
|
|
||||||
|
return js, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrettySignature formats a json signature into an easy to read
|
||||||
|
// single json serialized object.
|
||||||
|
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
|
||||||
|
if len(js.signatures) == 0 {
|
||||||
|
return nil, errors.New("no signatures")
|
||||||
|
}
|
||||||
|
payload, err := joseBase64UrlDecode(js.payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payload = payload[:js.formatLength]
|
||||||
|
|
||||||
|
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||||
|
|
||||||
|
var marshalled []byte
|
||||||
|
var marshallErr error
|
||||||
|
if js.indent != "" {
|
||||||
|
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
|
||||||
|
} else {
|
||||||
|
marshalled, marshallErr = json.Marshal(js.signatures)
|
||||||
|
}
|
||||||
|
if marshallErr != nil {
|
||||||
|
return nil, marshallErr
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
|
||||||
|
buf.Write(payload)
|
||||||
|
buf.WriteByte(',')
|
||||||
|
if js.indent != "" {
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
buf.WriteString(js.indent)
|
||||||
|
buf.WriteByte('"')
|
||||||
|
buf.WriteString(signatureKey)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
buf.Write(marshalled)
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
} else {
|
||||||
|
buf.WriteByte('"')
|
||||||
|
buf.WriteString(signatureKey)
|
||||||
|
buf.WriteString("\":")
|
||||||
|
buf.Write(marshalled)
|
||||||
|
}
|
||||||
|
buf.WriteByte('}')
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signatures provides the signatures on this JWS as opaque blobs, sorted by
|
||||||
|
// keyID. These blobs can be stored and reassembled with payloads. Internally,
|
||||||
|
// they are simply marshaled json web signatures but implementations should
|
||||||
|
// not rely on this.
|
||||||
|
func (js *JSONSignature) Signatures() ([][]byte, error) {
|
||||||
|
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||||
|
|
||||||
|
var sb [][]byte
|
||||||
|
for _, jsig := range js.signatures {
|
||||||
|
p, err := json.Marshal(jsig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sb = append(sb, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge combines the signatures from one or more other signatures into the
|
||||||
|
// method receiver. If the payloads differ for any argument, an error will be
|
||||||
|
// returned and the receiver will not be modified.
|
||||||
|
func (js *JSONSignature) Merge(others ...*JSONSignature) error {
|
||||||
|
merged := js.signatures
|
||||||
|
for _, other := range others {
|
||||||
|
if js.payload != other.payload {
|
||||||
|
return fmt.Errorf("payloads differ from merge target")
|
||||||
|
}
|
||||||
|
merged = append(merged, other.signatures...)
|
||||||
|
}
|
||||||
|
|
||||||
|
js.signatures = merged
|
||||||
|
return nil
|
||||||
|
}
|
||||||
253
vendor/github.com/docker/libtrust/key.go
generated
vendored
Normal file
253
vendor/github.com/docker/libtrust/key.go
generated
vendored
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicKey is a generic interface for a Public Key.
|
||||||
|
type PublicKey interface {
|
||||||
|
// KeyType returns the key type for this key. For elliptic curve keys,
|
||||||
|
// this value should be "EC". For RSA keys, this value should be "RSA".
|
||||||
|
KeyType() string
|
||||||
|
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||||
|
// The format generated by this library is a base32 encoding of a 240 bit
|
||||||
|
// hash of the public key data divided into 12 groups like so:
|
||||||
|
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
|
||||||
|
KeyID() string
|
||||||
|
// Verify verifyies the signature of the data in the io.Reader using this
|
||||||
|
// Public Key. The alg parameter should identify the digital signature
|
||||||
|
// algorithm which was used to produce the signature and should be
|
||||||
|
// supported by this public key. Returns a nil error if the signature
|
||||||
|
// is valid.
|
||||||
|
Verify(data io.Reader, alg string, signature []byte) error
|
||||||
|
// CryptoPublicKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The type
|
||||||
|
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
CryptoPublicKey() crypto.PublicKey
|
||||||
|
// These public keys can be serialized to the standard JSON encoding for
|
||||||
|
// JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
|
||||||
|
// Algorithms.
|
||||||
|
MarshalJSON() ([]byte, error)
|
||||||
|
// These keys can also be serialized to the standard PEM encoding.
|
||||||
|
PEMBlock() (*pem.Block, error)
|
||||||
|
// The string representation of a key is its key type and ID.
|
||||||
|
String() string
|
||||||
|
AddExtendedField(string, interface{})
|
||||||
|
GetExtendedField(string) interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrivateKey is a generic interface for a Private Key.
|
||||||
|
type PrivateKey interface {
|
||||||
|
// A PrivateKey contains all fields and methods of a PublicKey of the
|
||||||
|
// same type. The MarshalJSON method also outputs the private key as a
|
||||||
|
// JSON Web Key, and the PEMBlock method outputs the private key as a
|
||||||
|
// PEM block.
|
||||||
|
PublicKey
|
||||||
|
// PublicKey returns the PublicKey associated with this PrivateKey.
|
||||||
|
PublicKey() PublicKey
|
||||||
|
// Sign signs the data read from the io.Reader using a signature algorithm
|
||||||
|
// supported by the private key. If the specified hashing algorithm is
|
||||||
|
// supported by this key, that hash function is used to generate the
|
||||||
|
// signature otherwise the the default hashing algorithm for this key is
|
||||||
|
// used. Returns the signature and identifier of the algorithm used.
|
||||||
|
Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
|
||||||
|
// CryptoPrivateKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The
|
||||||
|
// type is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
CryptoPrivateKey() crypto.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
|
||||||
|
// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
|
||||||
|
// key is of an unsupported type.
|
||||||
|
func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
|
||||||
|
switch cryptoPublicKey := cryptoPublicKey.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
return fromECPublicKey(cryptoPublicKey)
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
return fromRSAPublicKey(cryptoPublicKey), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
|
||||||
|
// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
|
||||||
|
// key is of an unsupported type.
|
||||||
|
func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
|
||||||
|
switch cryptoPrivateKey := cryptoPrivateKey.(type) {
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
return fromECPrivateKey(cryptoPrivateKey)
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
return fromRSAPrivateKey(cryptoPrivateKey), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
|
||||||
|
// PublicKey or an error if there is a problem with the encoding.
|
||||||
|
func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
|
||||||
|
pemBlock, _ := pem.Decode(data)
|
||||||
|
if pemBlock == nil {
|
||||||
|
return nil, errors.New("unable to find PEM encoded data")
|
||||||
|
} else if pemBlock.Type != "PUBLIC KEY" {
|
||||||
|
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubKeyFromPEMBlock(pemBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
|
||||||
|
// PEM blocks appended one after the other and returns a slice of PublicKey
|
||||||
|
// objects that it finds.
|
||||||
|
func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
|
||||||
|
pubKeys := []PublicKey{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
var pemBlock *pem.Block
|
||||||
|
pemBlock, data = pem.Decode(data)
|
||||||
|
if pemBlock == nil {
|
||||||
|
break
|
||||||
|
} else if pemBlock.Type != "PUBLIC KEY" {
|
||||||
|
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey, err := pubKeyFromPEMBlock(pemBlock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKeys = append(pubKeys, pubKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubKeys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
|
||||||
|
// PrivateKey or an error if there is a problem with the encoding.
|
||||||
|
func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
|
||||||
|
pemBlock, _ := pem.Decode(data)
|
||||||
|
if pemBlock == nil {
|
||||||
|
return nil, errors.New("unable to find PEM encoded data")
|
||||||
|
}
|
||||||
|
|
||||||
|
var key PrivateKey
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case pemBlock.Type == "RSA PRIVATE KEY":
|
||||||
|
rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
|
||||||
|
}
|
||||||
|
key = fromRSAPrivateKey(rsaPrivateKey)
|
||||||
|
case pemBlock.Type == "EC PRIVATE KEY":
|
||||||
|
ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
|
||||||
|
}
|
||||||
|
key, err = fromECPrivateKey(ecPrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
addPEMHeadersToKey(pemBlock, key.PublicKey())
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
|
||||||
|
// Public Key to be used with libtrust.
|
||||||
|
func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
|
||||||
|
jwk := make(map[string]interface{})
|
||||||
|
|
||||||
|
err := json.Unmarshal(data, &jwk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"decoding JWK Public Key JSON data: %s\n", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the Key Type value.
|
||||||
|
kty, err := stringFromMap(jwk, "kty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK Public Key type: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case kty == "EC":
|
||||||
|
// Call out to unmarshal EC public key.
|
||||||
|
return ecPublicKeyFromMap(jwk)
|
||||||
|
case kty == "RSA":
|
||||||
|
// Call out to unmarshal RSA public key.
|
||||||
|
return rsaPublicKeyFromMap(jwk)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"JWK Public Key type not supported: %q\n", kty,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
|
||||||
|
// and returns a slice of Public Key objects.
|
||||||
|
func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
|
||||||
|
rawKeys, err := loadJSONKeySetRaw(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKeys := make([]PublicKey, 0, len(rawKeys))
|
||||||
|
|
||||||
|
for _, rawKey := range rawKeys {
|
||||||
|
pubKey, err := UnmarshalPublicKeyJWK(rawKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pubKeys = append(pubKeys, pubKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubKeys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
|
||||||
|
// Private Key to be used with libtrust.
|
||||||
|
func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
|
||||||
|
jwk := make(map[string]interface{})
|
||||||
|
|
||||||
|
err := json.Unmarshal(data, &jwk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"decoding JWK Private Key JSON data: %s\n", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the Key Type value.
|
||||||
|
kty, err := stringFromMap(jwk, "kty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK Private Key type: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case kty == "EC":
|
||||||
|
// Call out to unmarshal EC private key.
|
||||||
|
return ecPrivateKeyFromMap(jwk)
|
||||||
|
case kty == "RSA":
|
||||||
|
// Call out to unmarshal RSA private key.
|
||||||
|
return rsaPrivateKeyFromMap(jwk)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"JWK Private Key type not supported: %q\n", kty,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
255
vendor/github.com/docker/libtrust/key_files.go
generated
vendored
Normal file
255
vendor/github.com/docker/libtrust/key_files.go
generated
vendored
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrKeyFileDoesNotExist indicates that the private key file does not exist.
|
||||||
|
ErrKeyFileDoesNotExist = errors.New("key file does not exist")
|
||||||
|
)
|
||||||
|
|
||||||
|
func readKeyFileBytes(filename string) ([]byte, error) {
|
||||||
|
data, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = ErrKeyFileDoesNotExist
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("unable to read key file %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Loading and Saving of Public and Private Keys in either PEM or JWK format.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// LoadKeyFile opens the given filename and attempts to read a Private Key
|
||||||
|
// encoded in either PEM or JWK format (if .json or .jwk file extension).
|
||||||
|
func LoadKeyFile(filename string) (PrivateKey, error) {
|
||||||
|
contents, err := readKeyFileBytes(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var key PrivateKey
|
||||||
|
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
key, err = UnmarshalPrivateKeyJWK(contents)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
key, err = UnmarshalPrivateKeyPEM(contents)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
|
||||||
|
// encoded in either PEM or JWK format (if .json or .jwk file extension).
|
||||||
|
func LoadPublicKeyFile(filename string) (PublicKey, error) {
|
||||||
|
contents, err := readKeyFileBytes(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var key PublicKey
|
||||||
|
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
key, err = UnmarshalPublicKeyJWK(contents)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
key, err = UnmarshalPublicKeyPEM(contents)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveKey saves the given key to a file using the provided filename.
|
||||||
|
// This process will overwrite any existing file at the provided location.
|
||||||
|
func SaveKey(filename string, key PrivateKey) error {
|
||||||
|
var encodedKey []byte
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
// Encode in JSON Web Key format.
|
||||||
|
encodedKey, err = json.MarshalIndent(key, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Encode in PEM format.
|
||||||
|
pemBlock, err := key.PEMBlock()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||||
|
}
|
||||||
|
encodedKey = pem.EncodeToMemory(pemBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to write private key file %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SavePublicKey saves the given public key to the file.
|
||||||
|
func SavePublicKey(filename string, key PublicKey) error {
|
||||||
|
var encodedKey []byte
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
// Encode in JSON Web Key format.
|
||||||
|
encodedKey, err = json.MarshalIndent(key, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode public key JWK: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Encode in PEM format.
|
||||||
|
pemBlock, err := key.PEMBlock()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode public key PEM: %s", err)
|
||||||
|
}
|
||||||
|
encodedKey = pem.EncodeToMemory(pemBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to write public key file %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Public Key Set files
|
||||||
|
|
||||||
|
type jwkSet struct {
|
||||||
|
Keys []json.RawMessage `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadKeySetFile loads a key set
|
||||||
|
func LoadKeySetFile(filename string) ([]PublicKey, error) {
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
return loadJSONKeySetFile(filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must be a PEM format file
|
||||||
|
return loadPEMKeySetFile(filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
|
||||||
|
if len(data) == 0 {
|
||||||
|
// This is okay, just return an empty slice.
|
||||||
|
return []json.RawMessage{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
keySet := jwkSet{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(data, &keySet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keySet.Keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
|
||||||
|
contents, err := readKeyFileBytes(filename)
|
||||||
|
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return UnmarshalPublicKeyJWKSet(contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
|
||||||
|
data, err := readKeyFileBytes(filename)
|
||||||
|
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return UnmarshalPublicKeyPEMBundle(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddKeySetFile adds a key to a key set
|
||||||
|
func AddKeySetFile(filename string, key PublicKey) error {
|
||||||
|
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||||
|
return addKeySetJSONFile(filename, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must be a PEM format file
|
||||||
|
return addKeySetPEMFile(filename, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addKeySetJSONFile(filename string, key PublicKey) error {
|
||||||
|
encodedKey, err := json.Marshal(key)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode trusted client key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, err := readKeyFileBytes(filename)
|
||||||
|
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawEntries, err := loadJSONKeySetRaw(contents)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawEntries = append(rawEntries, json.RawMessage(encodedKey))
|
||||||
|
entriesWrapper := jwkSet{Keys: rawEntries}
|
||||||
|
|
||||||
|
encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encode trusted client keys: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addKeySetPEMFile(filename string, key PublicKey) error {
|
||||||
|
// Encode to PEM, open file for appending, write PEM.
|
||||||
|
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
pemBlock, err := key.PEMBlock()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to encoded trusted key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = file.Write(pem.EncodeToMemory(pemBlock))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to write trusted keys file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
175
vendor/github.com/docker/libtrust/key_manager.go
generated
vendored
Normal file
175
vendor/github.com/docker/libtrust/key_manager.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientKeyManager manages client keys on the filesystem
|
||||||
|
type ClientKeyManager struct {
|
||||||
|
key PrivateKey
|
||||||
|
clientFile string
|
||||||
|
clientDir string
|
||||||
|
|
||||||
|
clientLock sync.RWMutex
|
||||||
|
clients []PublicKey
|
||||||
|
|
||||||
|
configLock sync.Mutex
|
||||||
|
configs []*tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientKeyManager loads a new manager from a set of key files
|
||||||
|
// and managed by the given private key.
|
||||||
|
func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
|
||||||
|
m := &ClientKeyManager{
|
||||||
|
key: trustKey,
|
||||||
|
clientFile: clientFile,
|
||||||
|
clientDir: clientDir,
|
||||||
|
}
|
||||||
|
if err := m.loadKeys(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO Start watching file and directory
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ClientKeyManager) loadKeys() (err error) {
|
||||||
|
// Load authorized keys file
|
||||||
|
var clients []PublicKey
|
||||||
|
if c.clientFile != "" {
|
||||||
|
clients, err = LoadKeySetFile(c.clientFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to load authorized keys: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add clients from authorized keys directory
|
||||||
|
files, err := ioutil.ReadDir(c.clientDir)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("unable to open authorized keys directory: %s", err)
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
if !f.IsDir() {
|
||||||
|
publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to load authorized key file: %s", err)
|
||||||
|
}
|
||||||
|
clients = append(clients, publicKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.clientLock.Lock()
|
||||||
|
c.clients = clients
|
||||||
|
c.clientLock.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterTLSConfig registers a tls configuration to manager
|
||||||
|
// such that any changes to the keys may be reflected in
|
||||||
|
// the tls client CA pool
|
||||||
|
func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
|
||||||
|
c.clientLock.RLock()
|
||||||
|
certPool, err := GenerateCACertPool(c.key, c.clients)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("CA pool generation error: %s", err)
|
||||||
|
}
|
||||||
|
c.clientLock.RUnlock()
|
||||||
|
|
||||||
|
tlsConfig.ClientCAs = certPool
|
||||||
|
|
||||||
|
c.configLock.Lock()
|
||||||
|
c.configs = append(c.configs, tlsConfig)
|
||||||
|
c.configLock.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
|
||||||
|
// libtrust identity authentication for the domain specified
|
||||||
|
func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
|
||||||
|
tlsConfig := newTLSConfig()
|
||||||
|
|
||||||
|
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||||
|
if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate cert
|
||||||
|
ips, domains, err := parseAddr(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// add domain that it expects clients to use
|
||||||
|
domains = append(domains, domain)
|
||||||
|
x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate generation error: %s", err)
|
||||||
|
}
|
||||||
|
tlsConfig.Certificates = []tls.Certificate{{
|
||||||
|
Certificate: [][]byte{x509Cert.Raw},
|
||||||
|
PrivateKey: trustKey.CryptoPrivateKey(),
|
||||||
|
Leaf: x509Cert,
|
||||||
|
}}
|
||||||
|
|
||||||
|
return tlsConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCertAuthTLSConfig creates a tls.Config for the server to use for
|
||||||
|
// certificate authentication
|
||||||
|
func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
|
||||||
|
tlsConfig := newTLSConfig()
|
||||||
|
|
||||||
|
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
|
||||||
|
}
|
||||||
|
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||||
|
|
||||||
|
// Verify client certificates against a CA?
|
||||||
|
if caPath != "" {
|
||||||
|
certPool := x509.NewCertPool()
|
||||||
|
file, err := ioutil.ReadFile(caPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
|
||||||
|
}
|
||||||
|
certPool.AppendCertsFromPEM(file)
|
||||||
|
|
||||||
|
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||||
|
tlsConfig.ClientCAs = certPool
|
||||||
|
}
|
||||||
|
|
||||||
|
return tlsConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTLSConfig() *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
NextProtos: []string{"http/1.1"},
|
||||||
|
// Avoid fallback on insecure SSL protocols
|
||||||
|
MinVersion: tls.VersionTLS10,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseAddr parses an address into an array of IPs and domains
|
||||||
|
func parseAddr(addr string) ([]net.IP, []string, error) {
|
||||||
|
host, _, err := net.SplitHostPort(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
var domains []string
|
||||||
|
var ips []net.IP
|
||||||
|
ip := net.ParseIP(host)
|
||||||
|
if ip != nil {
|
||||||
|
ips = []net.IP{ip}
|
||||||
|
} else {
|
||||||
|
domains = []string{host}
|
||||||
|
}
|
||||||
|
return ips, domains, nil
|
||||||
|
}
|
||||||
427
vendor/github.com/docker/libtrust/rsa_key.go
generated
vendored
Normal file
427
vendor/github.com/docker/libtrust/rsa_key.go
generated
vendored
Normal file
@ -0,0 +1,427 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RSA DSA PUBLIC KEY
|
||||||
|
*/
|
||||||
|
|
||||||
|
// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
|
||||||
|
type rsaPublicKey struct {
|
||||||
|
*rsa.PublicKey
|
||||||
|
extended map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
|
||||||
|
return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
|
||||||
|
func (k *rsaPublicKey) KeyType() string {
|
||||||
|
return "RSA"
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||||
|
func (k *rsaPublicKey) KeyID() string {
|
||||||
|
return keyIDFromCryptoKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPublicKey) String() string {
|
||||||
|
return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifyies the signature of the data in the io.Reader using this Public Key.
|
||||||
|
// The alg parameter should be the name of the JWA digital signature algorithm
|
||||||
|
// which was used to produce the signature and should be supported by this
|
||||||
|
// public key. Returns a nil error if the signature is valid.
|
||||||
|
func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
|
||||||
|
// Verify the signature of the given date, return non-nil error if valid.
|
||||||
|
sigAlg, err := rsaSignatureAlgorithmByName(alg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to verify Signature: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sigAlg.HashID().New()
|
||||||
|
_, err = io.Copy(hasher, data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading data to sign: %s", err)
|
||||||
|
}
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
|
||||||
|
err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoPublicKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The type
|
||||||
|
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||||
|
return k.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPublicKey) toMap() map[string]interface{} {
|
||||||
|
jwk := make(map[string]interface{})
|
||||||
|
for k, v := range k.extended {
|
||||||
|
jwk[k] = v
|
||||||
|
}
|
||||||
|
jwk["kty"] = k.KeyType()
|
||||||
|
jwk["kid"] = k.KeyID()
|
||||||
|
jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
|
||||||
|
jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
|
||||||
|
|
||||||
|
return jwk
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
|
||||||
|
// RSA keys.
|
||||||
|
func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
|
||||||
|
return json.Marshal(k.toMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
|
||||||
|
func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
|
||||||
|
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
|
||||||
|
}
|
||||||
|
k.extended["kid"] = k.KeyID() // For display purposes.
|
||||||
|
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
|
||||||
|
k.extended[field] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
|
||||||
|
v, ok := k.extended[field]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
|
||||||
|
// JWK key type (kty) has already been determined to be "RSA".
|
||||||
|
// Need to extract 'n', 'e', and 'kid' and check for
|
||||||
|
// consistency.
|
||||||
|
|
||||||
|
// Get the modulus parameter N.
|
||||||
|
nB64Url, err := stringFromMap(jwk, "n")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := parseRSAModulusParam(nB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the public exponent E.
|
||||||
|
eB64Url, err := stringFromMap(jwk, "e")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e, err := parseRSAPublicExponentParam(eB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := &rsaPublicKey{
|
||||||
|
PublicKey: &rsa.PublicKey{N: n, E: e},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key ID is optional, but if it exists, it should match the key.
|
||||||
|
_, ok := jwk["kid"]
|
||||||
|
if ok {
|
||||||
|
kid, err := stringFromMap(jwk, "kid")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
|
||||||
|
}
|
||||||
|
if kid != key.KeyID() {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := jwk["d"]; ok {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
|
||||||
|
}
|
||||||
|
|
||||||
|
key.extended = jwk
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RSA DSA PRIVATE KEY
|
||||||
|
*/
|
||||||
|
|
||||||
|
// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
|
||||||
|
type rsaPrivateKey struct {
|
||||||
|
rsaPublicKey
|
||||||
|
*rsa.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
|
||||||
|
return &rsaPrivateKey{
|
||||||
|
*fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
|
||||||
|
cryptoPrivateKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicKey returns the Public Key data associated with this Private Key.
|
||||||
|
func (k *rsaPrivateKey) PublicKey() PublicKey {
|
||||||
|
return &k.rsaPublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPrivateKey) String() string {
|
||||||
|
return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs the data read from the io.Reader using a signature algorithm supported
|
||||||
|
// by the RSA private key. If the specified hashing algorithm is supported by
|
||||||
|
// this key, that hash function is used to generate the signature otherwise the
|
||||||
|
// the default hashing algorithm for this key is used. Returns the signature
|
||||||
|
// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
|
||||||
|
// "RS512".
|
||||||
|
func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
|
||||||
|
// Generate a signature of the data using the internal alg.
|
||||||
|
sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
|
||||||
|
hasher := sigAlg.HashID().New()
|
||||||
|
|
||||||
|
_, err = io.Copy(hasher, data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
|
||||||
|
}
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
|
||||||
|
signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("error producing signature: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
alg = sigAlg.HeaderParam()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoPrivateKey returns the internal object which can be used as a
|
||||||
|
// crypto.PublicKey for use with other standard library operations. The type
|
||||||
|
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||||
|
func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
|
||||||
|
return k.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *rsaPrivateKey) toMap() map[string]interface{} {
|
||||||
|
k.Precompute() // Make sure the precomputed values are stored.
|
||||||
|
jwk := k.rsaPublicKey.toMap()
|
||||||
|
|
||||||
|
jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
|
||||||
|
jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
|
||||||
|
jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
|
||||||
|
jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
|
||||||
|
jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
|
||||||
|
jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
|
||||||
|
|
||||||
|
otherPrimes := k.Primes[2:]
|
||||||
|
|
||||||
|
if len(otherPrimes) > 0 {
|
||||||
|
otherPrimesInfo := make([]interface{}, len(otherPrimes))
|
||||||
|
for i, r := range otherPrimes {
|
||||||
|
otherPrimeInfo := make(map[string]string, 3)
|
||||||
|
otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
|
||||||
|
crtVal := k.Precomputed.CRTValues[i]
|
||||||
|
otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
|
||||||
|
otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
|
||||||
|
otherPrimesInfo[i] = otherPrimeInfo
|
||||||
|
}
|
||||||
|
jwk["oth"] = otherPrimesInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
return jwk
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
|
||||||
|
// RSA keys.
|
||||||
|
func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
|
||||||
|
return json.Marshal(k.toMap())
|
||||||
|
}
|
||||||
|
|
||||||
|
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
|
||||||
|
func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
|
||||||
|
derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
|
||||||
|
k.extended["keyID"] = k.KeyID() // For display purposes.
|
||||||
|
return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
|
||||||
|
// The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
|
||||||
|
// only the private key exponent 'd' is REQUIRED, the others are just for
|
||||||
|
// signature/decryption optimizations and SHOULD be included when the JWK
|
||||||
|
// is produced. We MAY choose to accept a JWK which only includes 'd', but
|
||||||
|
// we're going to go ahead and not choose to accept it without the extra
|
||||||
|
// fields. Only the 'oth' field will be optional (for multi-prime keys).
|
||||||
|
privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
|
||||||
|
}
|
||||||
|
firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||||
|
}
|
||||||
|
secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||||
|
}
|
||||||
|
firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||||
|
}
|
||||||
|
secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||||
|
}
|
||||||
|
crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var oth interface{}
|
||||||
|
if _, ok := jwk["oth"]; ok {
|
||||||
|
oth = jwk["oth"]
|
||||||
|
delete(jwk, "oth")
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWK key type (kty) has already been determined to be "RSA".
|
||||||
|
// Need to extract the public key information, then extract the private
|
||||||
|
// key values.
|
||||||
|
publicKey, err := rsaPublicKeyFromMap(jwk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey := &rsa.PrivateKey{
|
||||||
|
PublicKey: *publicKey.PublicKey,
|
||||||
|
D: privateExponent,
|
||||||
|
Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
|
||||||
|
Precomputed: rsa.PrecomputedValues{
|
||||||
|
Dp: firstFactorCRT,
|
||||||
|
Dq: secondFactorCRT,
|
||||||
|
Qinv: crtCoeff,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if oth != nil {
|
||||||
|
// Should be an array of more JSON objects.
|
||||||
|
otherPrimesInfo, ok := oth.([]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
|
||||||
|
}
|
||||||
|
numOtherPrimeFactors := len(otherPrimesInfo)
|
||||||
|
if numOtherPrimeFactors == 0 {
|
||||||
|
return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
|
||||||
|
}
|
||||||
|
otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
|
||||||
|
productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
|
||||||
|
crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
|
||||||
|
|
||||||
|
for i, val := range otherPrimesInfo {
|
||||||
|
otherPrimeinfo, ok := val.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
|
||||||
|
}
|
||||||
|
|
||||||
|
otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||||
|
}
|
||||||
|
otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||||
|
}
|
||||||
|
otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
crtValue := crtValues[i]
|
||||||
|
crtValue.Exp = otherFactorCRT
|
||||||
|
crtValue.Coeff = otherCrtCoeff
|
||||||
|
crtValue.R = productOfPrimes
|
||||||
|
otherPrimeFactors[i] = otherPrimeFactor
|
||||||
|
productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
|
||||||
|
privateKey.Precomputed.CRTValues = crtValues
|
||||||
|
}
|
||||||
|
|
||||||
|
key := &rsaPrivateKey{
|
||||||
|
rsaPublicKey: *publicKey,
|
||||||
|
PrivateKey: privateKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Key Generation Functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
|
||||||
|
k = new(rsaPrivateKey)
|
||||||
|
k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
|
||||||
|
k.extended = make(map[string]interface{})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
|
||||||
|
func GenerateRSA2048PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateRSAPrivateKey(2048)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
|
||||||
|
func GenerateRSA3072PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateRSAPrivateKey(3072)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
|
||||||
|
func GenerateRSA4096PrivateKey() (PrivateKey, error) {
|
||||||
|
k, err := generateRSAPrivateKey(4096)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
363
vendor/github.com/docker/libtrust/util.go
generated
vendored
Normal file
363
vendor/github.com/docker/libtrust/util.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
package libtrust
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoadOrCreateTrustKey will load a PrivateKey from the specified path
|
||||||
|
func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
|
||||||
|
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
trustKey, err := LoadKeyFile(trustKeyPath)
|
||||||
|
if err == ErrKeyFileDoesNotExist {
|
||||||
|
trustKey, err = GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := SaveKey(trustKeyPath, trustKey); err != nil {
|
||||||
|
return nil, fmt.Errorf("error saving key file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, file := filepath.Split(trustKeyPath)
|
||||||
|
if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
|
||||||
|
return nil, fmt.Errorf("error saving public key file: %s", err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("error loading key file: %s", err)
|
||||||
|
}
|
||||||
|
return trustKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
|
||||||
|
// based authentication from the specified dockerUrl, the rootConfigPath and
|
||||||
|
// the server name to which it is connecting.
|
||||||
|
// If trustUnknownHosts is true it will automatically add the host to the
|
||||||
|
// known-hosts.json in rootConfigPath.
|
||||||
|
func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
|
||||||
|
tlsConfig := newTLSConfig()
|
||||||
|
|
||||||
|
trustKeyPath := filepath.Join(rootConfigPath, "key.json")
|
||||||
|
knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
|
||||||
|
|
||||||
|
u, err := url.Parse(dockerUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse machine url")
|
||||||
|
}
|
||||||
|
|
||||||
|
if u.Scheme == "unix" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := u.Host
|
||||||
|
proto := "tcp"
|
||||||
|
|
||||||
|
trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to load trust key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
knownHosts, err := LoadKeySetFile(knownHostsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
allowedHosts, err := FilterByHosts(knownHosts, addr, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error filtering hosts: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certPool, err := GenerateCACertPool(trustKey, allowedHosts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not create CA pool: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfig.ServerName = serverName
|
||||||
|
tlsConfig.RootCAs = certPool
|
||||||
|
|
||||||
|
x509Cert, err := GenerateSelfSignedClientCert(trustKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate generation error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfig.Certificates = []tls.Certificate{{
|
||||||
|
Certificate: [][]byte{x509Cert.Raw},
|
||||||
|
PrivateKey: trustKey.CryptoPrivateKey(),
|
||||||
|
Leaf: x509Cert,
|
||||||
|
}}
|
||||||
|
|
||||||
|
tlsConfig.InsecureSkipVerify = true
|
||||||
|
|
||||||
|
testConn, err := tls.Dial(proto, addr, tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("tls Handshake error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := x509.VerifyOptions{
|
||||||
|
Roots: tlsConfig.RootCAs,
|
||||||
|
CurrentTime: time.Now(),
|
||||||
|
DNSName: tlsConfig.ServerName,
|
||||||
|
Intermediates: x509.NewCertPool(),
|
||||||
|
}
|
||||||
|
|
||||||
|
certs := testConn.ConnectionState().PeerCertificates
|
||||||
|
for i, cert := range certs {
|
||||||
|
if i == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
opts.Intermediates.AddCert(cert)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := certs[0].Verify(opts); err != nil {
|
||||||
|
if _, ok := err.(x509.UnknownAuthorityError); ok {
|
||||||
|
if trustUnknownHosts {
|
||||||
|
pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error extracting public key from cert: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey.AddExtendedField("hosts", []string{addr})
|
||||||
|
|
||||||
|
if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
|
||||||
|
return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testConn.Close()
|
||||||
|
tlsConfig.InsecureSkipVerify = false
|
||||||
|
|
||||||
|
return tlsConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// joseBase64UrlEncode encodes the given data using the standard base64 url
|
||||||
|
// encoding format but with all trailing '=' characters ommitted in accordance
|
||||||
|
// with the jose specification.
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||||
|
func joseBase64UrlEncode(b []byte) string {
|
||||||
|
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||||
|
}
|
||||||
|
|
||||||
|
// joseBase64UrlDecode decodes the given string using the standard base64 url
|
||||||
|
// decoder but first adds the appropriate number of trailing '=' characters in
|
||||||
|
// accordance with the jose specification.
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||||
|
func joseBase64UrlDecode(s string) ([]byte, error) {
|
||||||
|
s = strings.Replace(s, "\n", "", -1)
|
||||||
|
s = strings.Replace(s, " ", "", -1)
|
||||||
|
switch len(s) % 4 {
|
||||||
|
case 0:
|
||||||
|
case 2:
|
||||||
|
s += "=="
|
||||||
|
case 3:
|
||||||
|
s += "="
|
||||||
|
default:
|
||||||
|
return nil, errors.New("illegal base64url string")
|
||||||
|
}
|
||||||
|
return base64.URLEncoding.DecodeString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyIDEncode(b []byte) string {
|
||||||
|
s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
|
||||||
|
var buf bytes.Buffer
|
||||||
|
var i int
|
||||||
|
for i = 0; i < len(s)/4-1; i++ {
|
||||||
|
start := i * 4
|
||||||
|
end := start + 4
|
||||||
|
buf.WriteString(s[start:end] + ":")
|
||||||
|
}
|
||||||
|
buf.WriteString(s[i*4:])
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyIDFromCryptoKey(pubKey PublicKey) string {
|
||||||
|
// Generate and return a 'libtrust' fingerprint of the public key.
|
||||||
|
// For an RSA key this should be:
|
||||||
|
// SHA256(DER encoded ASN1)
|
||||||
|
// Then truncated to 240 bits and encoded into 12 base32 groups like so:
|
||||||
|
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
|
||||||
|
derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
hasher := crypto.SHA256.New()
|
||||||
|
hasher.Write(derBytes)
|
||||||
|
return keyIDEncode(hasher.Sum(nil)[:30])
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringFromMap(m map[string]interface{}, key string) (string, error) {
|
||||||
|
val, ok := m[key]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("%q value not specified", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
str, ok := val.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("%q value must be a string", key)
|
||||||
|
}
|
||||||
|
delete(m, key)
|
||||||
|
|
||||||
|
return str, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
|
||||||
|
curveByteLen := (curve.Params().BitSize + 7) >> 3
|
||||||
|
|
||||||
|
cBytes, err := joseBase64UrlDecode(cB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||||
|
}
|
||||||
|
cByteLength := len(cBytes)
|
||||||
|
if cByteLength != curveByteLen {
|
||||||
|
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
|
||||||
|
}
|
||||||
|
return new(big.Int).SetBytes(cBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
|
||||||
|
dBytes, err := joseBase64UrlDecode(dB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
||||||
|
// octets (where n is the order of the curve). This is because the private
|
||||||
|
// key d must be in the interval [1, n-1] so the bitlength of d should be
|
||||||
|
// no larger than the bitlength of n-1. The easiest way to find the octet
|
||||||
|
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
|
||||||
|
// bit sequence right by 3, which is essentially dividing by 8 and adding
|
||||||
|
// 1 if there is any remainder. Thus, the private key value d should be
|
||||||
|
// output to (bitlength(n-1)+7)>>3 octets.
|
||||||
|
n := curve.Params().N
|
||||||
|
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
|
||||||
|
dByteLength := len(dBytes)
|
||||||
|
|
||||||
|
if dByteLength != octetLength {
|
||||||
|
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
return new(big.Int).SetBytes(dBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
|
||||||
|
nBytes, err := joseBase64UrlDecode(nB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return new(big.Int).SetBytes(nBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeRSAPublicExponentParam(e int) []byte {
|
||||||
|
// We MUST use the minimum number of octets to represent E.
|
||||||
|
// E is supposed to be 65537 for performance and security reasons
|
||||||
|
// and is what golang's rsa package generates, but it might be
|
||||||
|
// different if imported from some other generator.
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(buf, uint32(e))
|
||||||
|
var i int
|
||||||
|
for i = 0; i < 8; i++ {
|
||||||
|
if buf[i] != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRSAPublicExponentParam(eB64Url string) (int, error) {
|
||||||
|
eBytes, err := joseBase64UrlDecode(eB64Url)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||||
|
}
|
||||||
|
// Only the minimum number of bytes were used to represent E, but
|
||||||
|
// binary.BigEndian.Uint32 expects at least 4 bytes, so we need
|
||||||
|
// to add zero padding if necassary.
|
||||||
|
byteLen := len(eBytes)
|
||||||
|
buf := make([]byte, 4-byteLen, 4)
|
||||||
|
eBytes = append(buf, eBytes...)
|
||||||
|
|
||||||
|
return int(binary.BigEndian.Uint32(eBytes)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
|
||||||
|
b64Url, err := stringFromMap(m, key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
paramBytes, err := joseBase64UrlDecode(b64Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return new(big.Int).SetBytes(paramBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
|
||||||
|
pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
|
||||||
|
for k, v := range headers {
|
||||||
|
switch val := v.(type) {
|
||||||
|
case string:
|
||||||
|
pemBlock.Headers[k] = val
|
||||||
|
case []string:
|
||||||
|
if k == "hosts" {
|
||||||
|
pemBlock.Headers[k] = strings.Join(val, ",")
|
||||||
|
} else {
|
||||||
|
// Return error, non-encodable type
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Return error, non-encodable type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pemBlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
|
||||||
|
cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
addPEMHeadersToKey(pemBlock, pubKey)
|
||||||
|
|
||||||
|
return pubKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
|
||||||
|
for key, value := range pemBlock.Headers {
|
||||||
|
var safeVal interface{}
|
||||||
|
if key == "hosts" {
|
||||||
|
safeVal = strings.Split(value, ",")
|
||||||
|
} else {
|
||||||
|
safeVal = value
|
||||||
|
}
|
||||||
|
pubKey.AddExtendedField(key, safeVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
2
vendor/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
Normal file
2
vendor/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# temporary symlink for testing
|
||||||
|
testing/data/symlink
|
||||||
27
vendor/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
Normal file
27
vendor/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
language: go
|
||||||
|
sudo: required
|
||||||
|
go:
|
||||||
|
- 1.4.2
|
||||||
|
- 1.5.3
|
||||||
|
- 1.6
|
||||||
|
- tip
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- osx
|
||||||
|
env:
|
||||||
|
- GOARCH=amd64 DOCKER_VERSION=1.8.3
|
||||||
|
- GOARCH=386 DOCKER_VERSION=1.8.3
|
||||||
|
- GOARCH=amd64 DOCKER_VERSION=1.9.1
|
||||||
|
- GOARCH=386 DOCKER_VERSION=1.9.1
|
||||||
|
- GOARCH=amd64 DOCKER_VERSION=1.10.3
|
||||||
|
- GOARCH=386 DOCKER_VERSION=1.10.3
|
||||||
|
install:
|
||||||
|
- travis_retry travis-scripts/install.bash
|
||||||
|
script:
|
||||||
|
- travis-scripts/run-tests.bash
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
132
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
Normal file
132
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
# This is the official list of go-dockerclient authors for copyright purposes.
|
||||||
|
|
||||||
|
Abhishek Chanda <abhishek.becs@gmail.com>
|
||||||
|
Adam Bell-Hanssen <adamb@aller.no>
|
||||||
|
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
|
||||||
|
Aldrin Leal <aldrin@leal.eng.br>
|
||||||
|
Andreas Jaekle <andreas@jaekle.net>
|
||||||
|
Andrews Medina <andrewsmedina@gmail.com>
|
||||||
|
Andrey Sibiryov <kobolog@uber.com>
|
||||||
|
Andy Goldstein <andy.goldstein@redhat.com>
|
||||||
|
Antonio Murdaca <runcom@redhat.com>
|
||||||
|
Artem Sidorenko <artem@2realities.com>
|
||||||
|
Ben Marini <ben@remind101.com>
|
||||||
|
Ben McCann <benmccann.com>
|
||||||
|
Ben Parees <bparees@redhat.com>
|
||||||
|
Benno van den Berg <bennovandenberg@gmail.com>
|
||||||
|
Bradley Cicenas <bradley.cicenas@gmail.com>
|
||||||
|
Brendan Fosberry <brendan@codeship.com>
|
||||||
|
Brian Lalor <blalor@bravo5.org>
|
||||||
|
Brian P. Hamachek <brian@brianhama.com>
|
||||||
|
Brian Palmer <brianp@instructure.com>
|
||||||
|
Bryan Boreham <bjboreham@gmail.com>
|
||||||
|
Burke Libbey <burke@libbey.me>
|
||||||
|
Carlos Diaz-Padron <cpadron@mozilla.com>
|
||||||
|
Cesar Wong <cewong@redhat.com>
|
||||||
|
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
||||||
|
Cheah Chu Yeow <chuyeow@gmail.com>
|
||||||
|
cheneydeng <cheneydeng@qq.com>
|
||||||
|
Chris Bednarski <banzaimonkey@gmail.com>
|
||||||
|
CMGS <ilskdw@gmail.com>
|
||||||
|
Colin Hebert <hebert.colin@gmail.com>
|
||||||
|
Craig Jellick <craig@rancher.com>
|
||||||
|
Dan Williams <dcbw@redhat.com>
|
||||||
|
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||||
|
Daniel Garcia <daniel@danielgarcia.info>
|
||||||
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
|
Darren Shepherd <darren@rancher.com>
|
||||||
|
Dave Choi <dave.choi@daumkakao.com>
|
||||||
|
David Huie <dahuie@gmail.com>
|
||||||
|
Dawn Chen <dawnchen@google.com>
|
||||||
|
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
||||||
|
Drew Wells <drew.wells00@gmail.com>
|
||||||
|
Ed <edrocksit@gmail.com>
|
||||||
|
Elias G. Schneevoigt <eliasgs@gmail.com>
|
||||||
|
Erez Horev <erez.horev@elastifile.com>
|
||||||
|
Eric Anderson <anderson@copperegg.com>
|
||||||
|
Ewout Prangsma <ewout@prangsma.net>
|
||||||
|
Fabio Rehm <fgrehm@gmail.com>
|
||||||
|
Fatih Arslan <ftharsln@gmail.com>
|
||||||
|
Flavia Missi <flaviamissi@gmail.com>
|
||||||
|
Francisco Souza <f@souza.cc>
|
||||||
|
Frank Groeneveld <frank@frankgroeneveld.nl>
|
||||||
|
George Moura <gwmoura@gmail.com>
|
||||||
|
Grégoire Delattre <gregoire.delattre@gmail.com>
|
||||||
|
Guillermo Álvarez Fernández <guillermo@cientifico.net>
|
||||||
|
Harry Zhang <harryzhang@zju.edu.cn>
|
||||||
|
He Simei <hesimei@zju.edu.cn>
|
||||||
|
Ivan Mikushin <i.mikushin@gmail.com>
|
||||||
|
James Bardin <jbardin@litl.com>
|
||||||
|
James Nugent <james@jen20.com>
|
||||||
|
Jari Kolehmainen <jari.kolehmainen@digia.com>
|
||||||
|
Jason Wilder <jwilder@litl.com>
|
||||||
|
Jawher Moussa <jawher.moussa@gmail.com>
|
||||||
|
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||||
|
Jeff Mitchell <jeffrey.mitchell@gmail.com>
|
||||||
|
Jeffrey Hulten <jhulten@gmail.com>
|
||||||
|
Jen Andre <jandre@gmail.com>
|
||||||
|
Jérôme Laurens <jeromelaurens@gmail.com>
|
||||||
|
Johan Euphrosine <proppy@google.com>
|
||||||
|
John Hughes <hughesj@visa.com>
|
||||||
|
Kamil Domanski <kamil@domanski.co>
|
||||||
|
Karan Misra <kidoman@gmail.com>
|
||||||
|
Ken Herner <chosenken@gmail.com>
|
||||||
|
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
|
||||||
|
Kyle Allan <kallan357@gmail.com>
|
||||||
|
Liron Levin <levinlir@gmail.com>
|
||||||
|
Lior Yankovich <lior@twistlock.com>
|
||||||
|
Liu Peng <vslene@gmail.com>
|
||||||
|
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||||
|
Lucas Clemente <lucas@clemente.io>
|
||||||
|
Lucas Weiblen <lucasweiblen@gmail.com>
|
||||||
|
Lyon Hill <lyondhill@gmail.com>
|
||||||
|
Mantas Matelis <mmatelis@coursera.org>
|
||||||
|
Martin Sweeney <martin@sweeney.io>
|
||||||
|
Máximo Cuadros Ortiz <mcuadros@gmail.com>
|
||||||
|
Michael Schmatz <michaelschmatz@gmail.com>
|
||||||
|
Michal Fojtik <mfojtik@redhat.com>
|
||||||
|
Mike Dillon <mike.dillon@synctree.com>
|
||||||
|
Mrunal Patel <mrunalp@gmail.com>
|
||||||
|
Nate Jones <nate@endot.org>
|
||||||
|
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
|
||||||
|
Nicholas Van Wiggeren <nvanwiggeren@digitalocean.com>
|
||||||
|
Nick Ethier <ncethier@gmail.com>
|
||||||
|
Omeid Matten <public@omeid.me>
|
||||||
|
Orivej Desh <orivej@gmx.fr>
|
||||||
|
Paul Bellamy <paul.a.bellamy@gmail.com>
|
||||||
|
Paul Morie <pmorie@gmail.com>
|
||||||
|
Paul Weil <pweil@redhat.com>
|
||||||
|
Peter Edge <peter.edge@gmail.com>
|
||||||
|
Peter Jihoon Kim <raingrove@gmail.com>
|
||||||
|
Phil Lu <lu@stackengine.com>
|
||||||
|
Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
|
||||||
|
Rafe Colton <rafael.colton@gmail.com>
|
||||||
|
Rob Miller <rob@kalistra.com>
|
||||||
|
Robert Williamson <williamson.robert@gmail.com>
|
||||||
|
Roman Khlystik <roman.khlystik@gmail.com>
|
||||||
|
Salvador Gironès <salvadorgirones@gmail.com>
|
||||||
|
Sam Rijs <srijs@airpost.net>
|
||||||
|
Sami Wagiaalla <swagiaal@redhat.com>
|
||||||
|
Samuel Archambault <sarchambault@lapresse.ca>
|
||||||
|
Samuel Karp <skarp@amazon.com>
|
||||||
|
Silas Sewell <silas@sewell.org>
|
||||||
|
Simon Eskildsen <sirup@sirupsen.com>
|
||||||
|
Simon Menke <simon.menke@gmail.com>
|
||||||
|
Skolos <skolos@gopherlab.com>
|
||||||
|
Soulou <leo@unbekandt.eu>
|
||||||
|
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||||
|
Summer Mousa <smousa@zenoss.com>
|
||||||
|
Sunjin Lee <styner32@gmail.com>
|
||||||
|
Tarsis Azevedo <tarsis@corp.globo.com>
|
||||||
|
Tim Schindler <tim@catalyst-zero.com>
|
||||||
|
Timothy St. Clair <tstclair@redhat.com>
|
||||||
|
Tobi Knaup <tobi@mesosphere.io>
|
||||||
|
Tom Wilkie <tom.wilkie@gmail.com>
|
||||||
|
Tonic <tonicbupt@gmail.com>
|
||||||
|
ttyh061 <ttyh061@gmail.com>
|
||||||
|
Victor Marmol <vmarmol@google.com>
|
||||||
|
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
||||||
|
Wiliam Souza <wiliamsouza83@gmail.com>
|
||||||
|
Ye Yin <eyniy@qq.com>
|
||||||
|
Yu, Zou <zouyu7@huawei.com>
|
||||||
|
Yuriy Bogdanov <chinsay@gmail.com>
|
||||||
6
vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
Normal file
6
vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
You can find the Docker license at the following link:
|
||||||
|
https://raw.githubusercontent.com/docker/docker/master/LICENSE
|
||||||
22
vendor/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
Normal file
22
vendor/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2016, go-dockerclient authors
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
57
vendor/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
Normal file
57
vendor/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
.PHONY: \
|
||||||
|
all \
|
||||||
|
vendor \
|
||||||
|
lint \
|
||||||
|
vet \
|
||||||
|
fmt \
|
||||||
|
fmtcheck \
|
||||||
|
pretest \
|
||||||
|
test \
|
||||||
|
integration \
|
||||||
|
cov \
|
||||||
|
clean
|
||||||
|
|
||||||
|
PKGS = . ./testing
|
||||||
|
|
||||||
|
all: test
|
||||||
|
|
||||||
|
vendor:
|
||||||
|
@ go get -v github.com/mjibson/party
|
||||||
|
party -d external -c -u
|
||||||
|
|
||||||
|
lint:
|
||||||
|
@ go get -v github.com/golang/lint/golint
|
||||||
|
@for file in $$(git ls-files '*.go' | grep -v 'external/'); do \
|
||||||
|
export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \
|
||||||
|
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
|
||||||
|
done; \
|
||||||
|
exit $${status:-0}
|
||||||
|
|
||||||
|
vet:
|
||||||
|
$(foreach pkg,$(PKGS),go vet $(pkg);)
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofmt -s -w $(PKGS)
|
||||||
|
|
||||||
|
fmtcheck:
|
||||||
|
@ export output=$$(gofmt -s -d $(PKGS)); \
|
||||||
|
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
|
||||||
|
exit $${status:-0}
|
||||||
|
|
||||||
|
pretest: lint vet fmtcheck
|
||||||
|
|
||||||
|
gotest:
|
||||||
|
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
||||||
|
|
||||||
|
test: pretest gotest
|
||||||
|
|
||||||
|
integration:
|
||||||
|
go test -tags docker_integration -run TestIntegration -v
|
||||||
|
|
||||||
|
cov:
|
||||||
|
@ go get -v github.com/axw/gocov/gocov
|
||||||
|
@ go get golang.org/x/tools/cmd/cover
|
||||||
|
gocov test | gocov report
|
||||||
|
|
||||||
|
clean:
|
||||||
|
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
|
||||||
105
vendor/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
Normal file
105
vendor/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# go-dockerclient
|
||||||
|
|
||||||
|
[](https://travis-ci.org/fsouza/go-dockerclient)
|
||||||
|
[](https://godoc.org/github.com/fsouza/go-dockerclient)
|
||||||
|
|
||||||
|
This package presents a client for the Docker remote API. It also provides
|
||||||
|
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
|
||||||
|
|
||||||
|
This package also provides support for docker's network API, which is a simple
|
||||||
|
passthrough to the libnetwork remote API. Note that docker's network API is
|
||||||
|
only available in docker 1.8 and above, and only enabled in docker if
|
||||||
|
DOCKER_EXPERIMENTAL is defined during the docker build process.
|
||||||
|
|
||||||
|
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
|
||||||
|
|
||||||
|
## Vendoring
|
||||||
|
|
||||||
|
If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
|
||||||
|
please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
|
||||||
|
is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
|
||||||
|
for details.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
endpoint := "unix:///var/run/docker.sock"
|
||||||
|
client, _ := docker.NewClient(endpoint)
|
||||||
|
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
|
||||||
|
for _, img := range imgs {
|
||||||
|
fmt.Println("ID: ", img.ID)
|
||||||
|
fmt.Println("RepoTags: ", img.RepoTags)
|
||||||
|
fmt.Println("Created: ", img.Created)
|
||||||
|
fmt.Println("Size: ", img.Size)
|
||||||
|
fmt.Println("VirtualSize: ", img.VirtualSize)
|
||||||
|
fmt.Println("ParentId: ", img.ParentID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using with TLS
|
||||||
|
|
||||||
|
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
endpoint := "tcp://[ip]:[port]"
|
||||||
|
path := os.Getenv("DOCKER_CERT_PATH")
|
||||||
|
ca := fmt.Sprintf("%s/ca.pem", path)
|
||||||
|
cert := fmt.Sprintf("%s/cert.pem", path)
|
||||||
|
key := fmt.Sprintf("%s/key.pem", path)
|
||||||
|
client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
|
||||||
|
// use client
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
|
||||||
|
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
client, _ := docker.NewClientFromEnv()
|
||||||
|
// use client
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See the documentation for more details.
|
||||||
|
|
||||||
|
## Developing
|
||||||
|
|
||||||
|
All development commands can be seen in the [Makefile](Makefile).
|
||||||
|
|
||||||
|
Commited code must pass:
|
||||||
|
|
||||||
|
* [golint](https://github.com/golang/lint)
|
||||||
|
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
|
||||||
|
* [gofmt](https://golang.org/cmd/gofmt)
|
||||||
|
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
|
||||||
|
|
||||||
|
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
|
||||||
138
vendor/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
Normal file
138
vendor/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
|
||||||
|
var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
|
||||||
|
|
||||||
|
// AuthConfiguration represents authentication options to use in the PushImage
|
||||||
|
// method. It represents the authentication in the Docker index server.
|
||||||
|
type AuthConfiguration struct {
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
ServerAddress string `json:"serveraddress,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthConfigurations represents authentication options to use for the
|
||||||
|
// PushImage method accommodating the new X-Registry-Config header
|
||||||
|
type AuthConfigurations struct {
|
||||||
|
Configs map[string]AuthConfiguration `json:"configs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthConfigurations119 is used to serialize a set of AuthConfigurations
|
||||||
|
// for Docker API >= 1.19.
|
||||||
|
type AuthConfigurations119 map[string]AuthConfiguration
|
||||||
|
|
||||||
|
// dockerConfig represents a registry authentation configuration from the
|
||||||
|
// .dockercfg file.
|
||||||
|
type dockerConfig struct {
|
||||||
|
Auth string `json:"auth"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
|
||||||
|
// ~/.dockercfg file.
|
||||||
|
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
||||||
|
var r io.Reader
|
||||||
|
var err error
|
||||||
|
p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
|
||||||
|
r, err = os.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
||||||
|
r, err = os.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NewAuthConfigurations(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
|
||||||
|
// same format as the .dockercfg file.
|
||||||
|
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
|
||||||
|
var auth *AuthConfigurations
|
||||||
|
confs, err := parseDockerConfig(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
auth, err = authConfigs(confs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
buf.ReadFrom(r)
|
||||||
|
byteData := buf.Bytes()
|
||||||
|
|
||||||
|
confsWrapper := struct {
|
||||||
|
Auths map[string]dockerConfig `json:"auths"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
|
||||||
|
if len(confsWrapper.Auths) > 0 {
|
||||||
|
return confsWrapper.Auths, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var confs map[string]dockerConfig
|
||||||
|
if err := json.Unmarshal(byteData, &confs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return confs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
|
||||||
|
func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
||||||
|
c := &AuthConfigurations{
|
||||||
|
Configs: make(map[string]AuthConfiguration),
|
||||||
|
}
|
||||||
|
for reg, conf := range confs {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
userpass := strings.SplitN(string(data), ":", 2)
|
||||||
|
if len(userpass) != 2 {
|
||||||
|
return nil, ErrCannotParseDockercfg
|
||||||
|
}
|
||||||
|
c.Configs[reg] = AuthConfiguration{
|
||||||
|
Email: conf.Email,
|
||||||
|
Username: userpass[0],
|
||||||
|
Password: userpass[1],
|
||||||
|
ServerAddress: reg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthCheck validates the given credentials. It returns nil if successful.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/m2SleN for more details.
|
||||||
|
func (c *Client) AuthCheck(conf *AuthConfiguration) error {
|
||||||
|
if conf == nil {
|
||||||
|
return fmt.Errorf("conf is nil")
|
||||||
|
}
|
||||||
|
resp, err := c.do("POST", "/auth", doOptions{data: conf})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
43
vendor/github.com/fsouza/go-dockerclient/change.go
generated
vendored
Normal file
43
vendor/github.com/fsouza/go-dockerclient/change.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// ChangeType is a type for constants indicating the type of change
|
||||||
|
// in a container
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChangeModify is the ChangeType for container modifications
|
||||||
|
ChangeModify ChangeType = iota
|
||||||
|
|
||||||
|
// ChangeAdd is the ChangeType for additions to a container
|
||||||
|
ChangeAdd
|
||||||
|
|
||||||
|
// ChangeDelete is the ChangeType for deletions from a container
|
||||||
|
ChangeDelete
|
||||||
|
)
|
||||||
|
|
||||||
|
// Change represents a change in a container.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/9GsTIF for more details.
|
||||||
|
type Change struct {
|
||||||
|
Path string
|
||||||
|
Kind ChangeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (change *Change) String() string {
|
||||||
|
var kind string
|
||||||
|
switch change.Kind {
|
||||||
|
case ChangeModify:
|
||||||
|
kind = "C"
|
||||||
|
case ChangeAdd:
|
||||||
|
kind = "A"
|
||||||
|
case ChangeDelete:
|
||||||
|
kind = "D"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||||
|
}
|
||||||
930
vendor/github.com/fsouza/go-dockerclient/client.go
generated
vendored
Normal file
930
vendor/github.com/fsouza/go-dockerclient/client.go
generated
vendored
Normal file
@ -0,0 +1,930 @@
|
|||||||
|
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package docker provides a client for the Docker remote API.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/G3plxW for more details on the remote API.
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const userAgent = "go-dockerclient"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
|
||||||
|
ErrInvalidEndpoint = errors.New("invalid endpoint")
|
||||||
|
|
||||||
|
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
|
||||||
|
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
|
||||||
|
|
||||||
|
apiVersion112, _ = NewAPIVersion("1.12")
|
||||||
|
|
||||||
|
apiVersion119, _ = NewAPIVersion("1.19")
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIVersion is an internal representation of a version of the Remote API.
|
||||||
|
type APIVersion []int
|
||||||
|
|
||||||
|
// NewAPIVersion returns an instance of APIVersion for the given string.
|
||||||
|
//
|
||||||
|
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
|
||||||
|
// <minor> and <patch> are integer numbers.
|
||||||
|
func NewAPIVersion(input string) (APIVersion, error) {
|
||||||
|
if !strings.Contains(input, ".") {
|
||||||
|
return nil, fmt.Errorf("Unable to parse version %q", input)
|
||||||
|
}
|
||||||
|
raw := strings.Split(input, "-")
|
||||||
|
arr := strings.Split(raw[0], ".")
|
||||||
|
ret := make(APIVersion, len(arr))
|
||||||
|
var err error
|
||||||
|
for i, val := range arr {
|
||||||
|
ret[i], err = strconv.Atoi(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (version APIVersion) String() string {
|
||||||
|
var str string
|
||||||
|
for i, val := range version {
|
||||||
|
str += strconv.Itoa(val)
|
||||||
|
if i < len(version)-1 {
|
||||||
|
str += "."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThan is a function for comparing APIVersion structs
|
||||||
|
func (version APIVersion) LessThan(other APIVersion) bool {
|
||||||
|
return version.compare(other) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThanOrEqualTo is a function for comparing APIVersion structs
|
||||||
|
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
|
||||||
|
return version.compare(other) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GreaterThan is a function for comparing APIVersion structs
|
||||||
|
func (version APIVersion) GreaterThan(other APIVersion) bool {
|
||||||
|
return version.compare(other) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
|
||||||
|
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
|
||||||
|
return version.compare(other) >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (version APIVersion) compare(other APIVersion) int {
|
||||||
|
for i, v := range version {
|
||||||
|
if i <= len(other)-1 {
|
||||||
|
otherVersion := other[i]
|
||||||
|
|
||||||
|
if v < otherVersion {
|
||||||
|
return -1
|
||||||
|
} else if v > otherVersion {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(version) > len(other) {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if len(version) < len(other) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is the basic type of this package. It provides methods for
|
||||||
|
// interaction with the API.
|
||||||
|
type Client struct {
|
||||||
|
SkipServerVersionCheck bool
|
||||||
|
HTTPClient *http.Client
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
Dialer *net.Dialer
|
||||||
|
|
||||||
|
endpoint string
|
||||||
|
endpointURL *url.URL
|
||||||
|
eventMonitor *eventMonitoringState
|
||||||
|
requestedAPIVersion APIVersion
|
||||||
|
serverAPIVersion APIVersion
|
||||||
|
expectedAPIVersion APIVersion
|
||||||
|
unixHTTPClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a Client instance ready for communication with the given
|
||||||
|
// server endpoint. It will use the latest remote API version available in the
|
||||||
|
// server.
|
||||||
|
func NewClient(endpoint string) (*Client, error) {
|
||||||
|
client, err := NewVersionedClient(endpoint, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.SkipServerVersionCheck = true
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLSClient returns a Client instance ready for TLS communications with the givens
|
||||||
|
// server endpoint, key and certificates . It will use the latest remote API version
|
||||||
|
// available in the server.
|
||||||
|
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
|
||||||
|
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.SkipServerVersionCheck = true
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||||
|
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||||
|
// read from a local file). It will use the latest remote API version available in the server.
|
||||||
|
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
|
||||||
|
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.SkipServerVersionCheck = true
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedClient returns a Client instance ready for communication with
|
||||||
|
// the given server endpoint, using a specific remote API version.
|
||||||
|
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
|
||||||
|
u, err := parseEndpoint(endpoint, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var requestedAPIVersion APIVersion
|
||||||
|
if strings.Contains(apiVersionString, ".") {
|
||||||
|
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &Client{
|
||||||
|
HTTPClient: cleanhttp.DefaultClient(),
|
||||||
|
Dialer: &net.Dialer{},
|
||||||
|
endpoint: endpoint,
|
||||||
|
endpointURL: u,
|
||||||
|
eventMonitor: new(eventMonitoringState),
|
||||||
|
requestedAPIVersion: requestedAPIVersion,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
|
||||||
|
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||||
|
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
|
||||||
|
// server endpoint, key and certificates, using a specific remote API version.
|
||||||
|
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||||
|
certPEMBlock, err := ioutil.ReadFile(cert)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keyPEMBlock, err := ioutil.ReadFile(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
caPEMCert, err := ioutil.ReadFile(ca)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientFromEnv returns a Client instance ready for communication created from
|
||||||
|
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
|
||||||
|
//
|
||||||
|
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||||
|
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||||
|
func NewClientFromEnv() (*Client, error) {
|
||||||
|
client, err := NewVersionedClientFromEnv("")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client.SkipServerVersionCheck = true
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
|
||||||
|
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
|
||||||
|
// and using a specific remote API version.
|
||||||
|
//
|
||||||
|
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||||
|
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||||
|
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
||||||
|
dockerEnv, err := getDockerEnv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dockerHost := dockerEnv.dockerHost
|
||||||
|
if dockerEnv.dockerTLSVerify {
|
||||||
|
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
||||||
|
}
|
||||||
|
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
||||||
|
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
||||||
|
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
||||||
|
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
|
||||||
|
}
|
||||||
|
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||||
|
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||||
|
// read from a local file), using a specific remote API version.
|
||||||
|
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
|
||||||
|
u, err := parseEndpoint(endpoint, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var requestedAPIVersion APIVersion
|
||||||
|
if strings.Contains(apiVersionString, ".") {
|
||||||
|
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if certPEMBlock == nil || keyPEMBlock == nil {
|
||||||
|
return nil, errors.New("Both cert and key are required")
|
||||||
|
}
|
||||||
|
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||||
|
if caPEMCert == nil {
|
||||||
|
tlsConfig.InsecureSkipVerify = true
|
||||||
|
} else {
|
||||||
|
caPool := x509.NewCertPool()
|
||||||
|
if !caPool.AppendCertsFromPEM(caPEMCert) {
|
||||||
|
return nil, errors.New("Could not add RootCA pem")
|
||||||
|
}
|
||||||
|
tlsConfig.RootCAs = caPool
|
||||||
|
}
|
||||||
|
tr := cleanhttp.DefaultTransport()
|
||||||
|
tr.TLSClientConfig = tlsConfig
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Client{
|
||||||
|
HTTPClient: &http.Client{Transport: tr},
|
||||||
|
TLSConfig: tlsConfig,
|
||||||
|
Dialer: &net.Dialer{},
|
||||||
|
endpoint: endpoint,
|
||||||
|
endpointURL: u,
|
||||||
|
eventMonitor: new(eventMonitoringState),
|
||||||
|
requestedAPIVersion: requestedAPIVersion,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) checkAPIVersion() error {
|
||||||
|
serverAPIVersionString, err := c.getServerAPIVersionString()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c.requestedAPIVersion == nil {
|
||||||
|
c.expectedAPIVersion = c.serverAPIVersion
|
||||||
|
} else {
|
||||||
|
c.expectedAPIVersion = c.requestedAPIVersion
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint returns the current endpoint. It's useful for getting the endpoint
|
||||||
|
// when using functions that get this data from the environment (like
|
||||||
|
// NewClientFromEnv.
|
||||||
|
func (c *Client) Endpoint() string {
|
||||||
|
return c.endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping pings the docker server
|
||||||
|
//
|
||||||
|
// See https://goo.gl/kQCfJj for more details.
|
||||||
|
func (c *Client) Ping() error {
|
||||||
|
path := "/_ping"
|
||||||
|
resp, err := c.do("GET", path, doOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return newError(resp)
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getServerAPIVersionString() (version string, err error) {
|
||||||
|
resp, err := c.do("GET", "/version", doOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
|
||||||
|
}
|
||||||
|
var versionResponse map[string]interface{}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
|
||||||
|
return version, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type doOptions struct {
|
||||||
|
data interface{}
|
||||||
|
forceJSON bool
|
||||||
|
headers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
|
||||||
|
var params io.Reader
|
||||||
|
if doOptions.data != nil || doOptions.forceJSON {
|
||||||
|
buf, err := json.Marshal(doOptions.data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params = bytes.NewBuffer(buf)
|
||||||
|
}
|
||||||
|
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||||
|
err := c.checkAPIVersion()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
httpClient := c.HTTPClient
|
||||||
|
protocol := c.endpointURL.Scheme
|
||||||
|
var u string
|
||||||
|
if protocol == "unix" {
|
||||||
|
httpClient = c.unixClient()
|
||||||
|
u = c.getFakeUnixURL(path)
|
||||||
|
} else {
|
||||||
|
u = c.getURL(path)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, u, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
if doOptions.data != nil {
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
} else if method == "POST" {
|
||||||
|
req.Header.Set("Content-Type", "plain/text")
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range doOptions.headers {
|
||||||
|
req.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return nil, ErrConnectionRefused
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||||
|
return nil, newError(resp)
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type streamOptions struct {
|
||||||
|
setRawTerminal bool
|
||||||
|
rawJSONStream bool
|
||||||
|
useJSONDecoder bool
|
||||||
|
headers map[string]string
|
||||||
|
in io.Reader
|
||||||
|
stdout io.Writer
|
||||||
|
stderr io.Writer
|
||||||
|
// timeout is the inital connection timeout
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
|
||||||
|
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
|
||||||
|
streamOptions.in = bytes.NewReader(nil)
|
||||||
|
}
|
||||||
|
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||||
|
err := c.checkAPIVersion()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
if method == "POST" {
|
||||||
|
req.Header.Set("Content-Type", "plain/text")
|
||||||
|
}
|
||||||
|
for key, val := range streamOptions.headers {
|
||||||
|
req.Header.Set(key, val)
|
||||||
|
}
|
||||||
|
var resp *http.Response
|
||||||
|
protocol := c.endpointURL.Scheme
|
||||||
|
address := c.endpointURL.Path
|
||||||
|
if streamOptions.stdout == nil {
|
||||||
|
streamOptions.stdout = ioutil.Discard
|
||||||
|
}
|
||||||
|
if streamOptions.stderr == nil {
|
||||||
|
streamOptions.stderr = ioutil.Discard
|
||||||
|
}
|
||||||
|
if protocol == "unix" {
|
||||||
|
dial, err := c.Dialer.Dial(protocol, address)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dial.Close()
|
||||||
|
breader := bufio.NewReader(dial)
|
||||||
|
err = req.Write(dial)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadResponse may hang if server does not replay
|
||||||
|
if streamOptions.timeout > 0 {
|
||||||
|
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp, err = http.ReadResponse(breader, req); err != nil {
|
||||||
|
// Cancel timeout for future I/O operations
|
||||||
|
if streamOptions.timeout > 0 {
|
||||||
|
dial.SetDeadline(time.Time{})
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return ErrConnectionRefused
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if resp, err = c.HTTPClient.Do(req); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return ErrConnectionRefused
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||||
|
return newError(resp)
|
||||||
|
}
|
||||||
|
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
|
||||||
|
// if we want to get raw json stream, just copy it back to output
|
||||||
|
// without decoding it
|
||||||
|
if streamOptions.rawJSONStream {
|
||||||
|
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
for {
|
||||||
|
var m jsonMessage
|
||||||
|
if err := dec.Decode(&m); err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.Stream != "" {
|
||||||
|
fmt.Fprint(streamOptions.stdout, m.Stream)
|
||||||
|
} else if m.Progress != "" {
|
||||||
|
fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
|
||||||
|
} else if m.Error != "" {
|
||||||
|
return errors.New(m.Error)
|
||||||
|
}
|
||||||
|
if m.Status != "" {
|
||||||
|
fmt.Fprintln(streamOptions.stdout, m.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if streamOptions.setRawTerminal {
|
||||||
|
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||||
|
} else {
|
||||||
|
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hijackOptions struct {
|
||||||
|
success chan struct{}
|
||||||
|
setRawTerminal bool
|
||||||
|
in io.Reader
|
||||||
|
stdout io.Writer
|
||||||
|
stderr io.Writer
|
||||||
|
data interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWaiter is an interface with methods for closing the underlying resource
|
||||||
|
// and then waiting for it to finish processing.
|
||||||
|
type CloseWaiter interface {
|
||||||
|
io.Closer
|
||||||
|
Wait() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type waiterFunc func() error
|
||||||
|
|
||||||
|
func (w waiterFunc) Wait() error { return w() }
|
||||||
|
|
||||||
|
type closerFunc func() error
|
||||||
|
|
||||||
|
func (c closerFunc) Close() error { return c() }
|
||||||
|
|
||||||
|
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
|
||||||
|
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||||
|
err := c.checkAPIVersion()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var params io.Reader
|
||||||
|
if hijackOptions.data != nil {
|
||||||
|
buf, err := json.Marshal(hijackOptions.data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params = bytes.NewBuffer(buf)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, c.getURL(path), params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("Connection", "Upgrade")
|
||||||
|
req.Header.Set("Upgrade", "tcp")
|
||||||
|
protocol := c.endpointURL.Scheme
|
||||||
|
address := c.endpointURL.Path
|
||||||
|
if protocol != "unix" {
|
||||||
|
protocol = "tcp"
|
||||||
|
address = c.endpointURL.Host
|
||||||
|
}
|
||||||
|
var dial net.Conn
|
||||||
|
if c.TLSConfig != nil && protocol != "unix" {
|
||||||
|
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dial, err = c.Dialer.Dial(protocol, address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := make(chan error)
|
||||||
|
quit := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
clientconn := httputil.NewClientConn(dial, nil)
|
||||||
|
defer clientconn.Close()
|
||||||
|
clientconn.Do(req)
|
||||||
|
if hijackOptions.success != nil {
|
||||||
|
hijackOptions.success <- struct{}{}
|
||||||
|
<-hijackOptions.success
|
||||||
|
}
|
||||||
|
rwc, br := clientconn.Hijack()
|
||||||
|
defer rwc.Close()
|
||||||
|
|
||||||
|
errChanOut := make(chan error, 1)
|
||||||
|
errChanIn := make(chan error, 1)
|
||||||
|
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
|
||||||
|
close(errChanOut)
|
||||||
|
} else {
|
||||||
|
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
|
||||||
|
// Otherwise, if the only stream you care about is stdin, your attach session
|
||||||
|
// will "hang" until the container terminates, even though you're not reading
|
||||||
|
// stdout/stderr
|
||||||
|
if hijackOptions.stdout == nil {
|
||||||
|
hijackOptions.stdout = ioutil.Discard
|
||||||
|
}
|
||||||
|
if hijackOptions.stderr == nil {
|
||||||
|
hijackOptions.stderr = ioutil.Discard
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
if hijackOptions.in != nil {
|
||||||
|
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
||||||
|
closer.Close()
|
||||||
|
}
|
||||||
|
errChanIn <- nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if hijackOptions.setRawTerminal {
|
||||||
|
_, err = io.Copy(hijackOptions.stdout, br)
|
||||||
|
} else {
|
||||||
|
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
||||||
|
}
|
||||||
|
errChanOut <- err
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
if hijackOptions.in != nil {
|
||||||
|
_, err = io.Copy(rwc, hijackOptions.in)
|
||||||
|
}
|
||||||
|
errChanIn <- err
|
||||||
|
rwc.(interface {
|
||||||
|
CloseWrite() error
|
||||||
|
}).CloseWrite()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var errIn error
|
||||||
|
select {
|
||||||
|
case errIn = <-errChanIn:
|
||||||
|
case <-quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var errOut error
|
||||||
|
select {
|
||||||
|
case errOut = <-errChanOut:
|
||||||
|
case <-quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if errIn != nil {
|
||||||
|
errs <- errIn
|
||||||
|
} else {
|
||||||
|
errs <- errOut
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return struct {
|
||||||
|
closerFunc
|
||||||
|
waiterFunc
|
||||||
|
}{
|
||||||
|
closerFunc(func() error { close(quit); return nil }),
|
||||||
|
waiterFunc(func() error { return <-errs }),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getURL(path string) string {
|
||||||
|
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
|
||||||
|
if c.endpointURL.Scheme == "unix" {
|
||||||
|
urlStr = ""
|
||||||
|
}
|
||||||
|
if c.requestedAPIVersion != nil {
|
||||||
|
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s%s", urlStr, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
|
||||||
|
// domain socket to the given path.
|
||||||
|
func (c *Client) getFakeUnixURL(path string) string {
|
||||||
|
u := *c.endpointURL // Copy.
|
||||||
|
|
||||||
|
// Override URL so that net/http will not complain.
|
||||||
|
u.Scheme = "http"
|
||||||
|
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
|
||||||
|
u.Path = ""
|
||||||
|
urlStr := strings.TrimRight(u.String(), "/")
|
||||||
|
if c.requestedAPIVersion != nil {
|
||||||
|
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s%s", urlStr, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) unixClient() *http.Client {
|
||||||
|
if c.unixHTTPClient != nil {
|
||||||
|
return c.unixHTTPClient
|
||||||
|
}
|
||||||
|
socketPath := c.endpointURL.Path
|
||||||
|
tr := &http.Transport{
|
||||||
|
Dial: func(network, addr string) (net.Conn, error) {
|
||||||
|
return c.Dialer.Dial("unix", socketPath)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cleanhttp.SetTransportFinalizer(tr)
|
||||||
|
c.unixHTTPClient = &http.Client{Transport: tr}
|
||||||
|
return c.unixHTTPClient
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonMessage struct {
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
Progress string `json:"progress,omitempty"`
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
Stream string `json:"stream,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryString(opts interface{}) string {
|
||||||
|
if opts == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
value := reflect.ValueOf(opts)
|
||||||
|
if value.Kind() == reflect.Ptr {
|
||||||
|
value = value.Elem()
|
||||||
|
}
|
||||||
|
if value.Kind() != reflect.Struct {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
items := url.Values(map[string][]string{})
|
||||||
|
for i := 0; i < value.NumField(); i++ {
|
||||||
|
field := value.Type().Field(i)
|
||||||
|
if field.PkgPath != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key := field.Tag.Get("qs")
|
||||||
|
if key == "" {
|
||||||
|
key = strings.ToLower(field.Name)
|
||||||
|
} else if key == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addQueryStringValue(items, key, value.Field(i))
|
||||||
|
}
|
||||||
|
return items.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
if v.Bool() {
|
||||||
|
items.Add(key, "1")
|
||||||
|
}
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
if v.Int() > 0 {
|
||||||
|
items.Add(key, strconv.FormatInt(v.Int(), 10))
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
if v.Float() > 0 {
|
||||||
|
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if v.String() != "" {
|
||||||
|
items.Add(key, v.String())
|
||||||
|
}
|
||||||
|
case reflect.Ptr:
|
||||||
|
if !v.IsNil() {
|
||||||
|
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||||
|
items.Add(key, string(b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
if len(v.MapKeys()) > 0 {
|
||||||
|
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||||
|
items.Add(key, string(b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
vLen := v.Len()
|
||||||
|
if vLen > 0 {
|
||||||
|
for i := 0; i < vLen; i++ {
|
||||||
|
addQueryStringValue(items, key, v.Index(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error represents failures in the API. It represents a failure from the API.
|
||||||
|
type Error struct {
|
||||||
|
Status int
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newError(resp *http.Response) *Error {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
|
||||||
|
}
|
||||||
|
return &Error{Status: resp.StatusCode, Message: string(data)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
||||||
|
if endpoint != "" && !strings.Contains(endpoint, "://") {
|
||||||
|
endpoint = "tcp://" + endpoint
|
||||||
|
}
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrInvalidEndpoint
|
||||||
|
}
|
||||||
|
if tls {
|
||||||
|
u.Scheme = "https"
|
||||||
|
}
|
||||||
|
switch u.Scheme {
|
||||||
|
case "unix":
|
||||||
|
return u, nil
|
||||||
|
case "http", "https", "tcp":
|
||||||
|
_, port, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*net.AddrError); ok {
|
||||||
|
if e.Err == "missing port in address" {
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ErrInvalidEndpoint
|
||||||
|
}
|
||||||
|
number, err := strconv.ParseInt(port, 10, 64)
|
||||||
|
if err == nil && number > 0 && number < 65536 {
|
||||||
|
if u.Scheme == "tcp" {
|
||||||
|
if tls {
|
||||||
|
u.Scheme = "https"
|
||||||
|
} else {
|
||||||
|
u.Scheme = "http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
return nil, ErrInvalidEndpoint
|
||||||
|
default:
|
||||||
|
return nil, ErrInvalidEndpoint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dockerEnv struct {
|
||||||
|
dockerHost string
|
||||||
|
dockerTLSVerify bool
|
||||||
|
dockerCertPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDockerEnv() (*dockerEnv, error) {
|
||||||
|
dockerHost := os.Getenv("DOCKER_HOST")
|
||||||
|
var err error
|
||||||
|
if dockerHost == "" {
|
||||||
|
dockerHost, err = DefaultDockerHost()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
|
||||||
|
var dockerCertPath string
|
||||||
|
if dockerTLSVerify {
|
||||||
|
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
||||||
|
if dockerCertPath == "" {
|
||||||
|
home := homedir.Get()
|
||||||
|
if home == "" {
|
||||||
|
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
|
||||||
|
}
|
||||||
|
dockerCertPath = filepath.Join(home, ".docker")
|
||||||
|
dockerCertPath, err = filepath.Abs(dockerCertPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &dockerEnv{
|
||||||
|
dockerHost: dockerHost,
|
||||||
|
dockerTLSVerify: dockerTLSVerify,
|
||||||
|
dockerCertPath: dockerCertPath,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultDockerHost returns the default docker socket for the current OS
|
||||||
|
func DefaultDockerHost() (string, error) {
|
||||||
|
var defaultHost string
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// If we do not have a host, default to TCP socket on Windows
|
||||||
|
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
||||||
|
} else {
|
||||||
|
// If we do not have a host, default to unix socket
|
||||||
|
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||||
|
}
|
||||||
|
return opts.ValidateHost(defaultHost)
|
||||||
|
}
|
||||||
1288
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
Normal file
1288
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
168
vendor/github.com/fsouza/go-dockerclient/env.go
generated
vendored
Normal file
168
vendor/github.com/fsouza/go-dockerclient/env.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
// Copyright 2014 Docker authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the DOCKER-LICENSE file.
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Env represents a list of key-pair represented in the form KEY=VALUE.
|
||||||
|
type Env []string
|
||||||
|
|
||||||
|
// Get returns the string value of the given key.
|
||||||
|
func (env *Env) Get(key string) (value string) {
|
||||||
|
return env.Map()[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists checks whether the given key is defined in the internal Env
|
||||||
|
// representation.
|
||||||
|
func (env *Env) Exists(key string) bool {
|
||||||
|
_, exists := env.Map()[key]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBool returns a boolean representation of the given key. The key is false
|
||||||
|
// whenever its value if 0, no, false, none or an empty string. Any other value
|
||||||
|
// will be interpreted as true.
|
||||||
|
func (env *Env) GetBool(key string) (value bool) {
|
||||||
|
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
||||||
|
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBool defines a boolean value to the given key.
|
||||||
|
func (env *Env) SetBool(key string, value bool) {
|
||||||
|
if value {
|
||||||
|
env.Set(key, "1")
|
||||||
|
} else {
|
||||||
|
env.Set(key, "0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInt returns the value of the provided key, converted to int.
|
||||||
|
//
|
||||||
|
// It the value cannot be represented as an integer, it returns -1.
|
||||||
|
func (env *Env) GetInt(key string) int {
|
||||||
|
return int(env.GetInt64(key))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetInt defines an integer value to the given key.
|
||||||
|
func (env *Env) SetInt(key string, value int) {
|
||||||
|
env.Set(key, strconv.Itoa(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInt64 returns the value of the provided key, converted to int64.
|
||||||
|
//
|
||||||
|
// It the value cannot be represented as an integer, it returns -1.
|
||||||
|
func (env *Env) GetInt64(key string) int64 {
|
||||||
|
s := strings.Trim(env.Get(key), " \t")
|
||||||
|
val, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetInt64 defines an integer (64-bit wide) value to the given key.
|
||||||
|
func (env *Env) SetInt64(key string, value int64) {
|
||||||
|
env.Set(key, strconv.FormatInt(value, 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetJSON unmarshals the value of the provided key in the provided iface.
|
||||||
|
//
|
||||||
|
// iface is a value that can be provided to the json.Unmarshal function.
|
||||||
|
func (env *Env) GetJSON(key string, iface interface{}) error {
|
||||||
|
sval := env.Get(key)
|
||||||
|
if sval == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return json.Unmarshal([]byte(sval), iface)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetJSON marshals the given value to JSON format and stores it using the
|
||||||
|
// provided key.
|
||||||
|
func (env *Env) SetJSON(key string, value interface{}) error {
|
||||||
|
sval, err := json.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
env.Set(key, string(sval))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetList returns a list of strings matching the provided key. It handles the
|
||||||
|
// list as a JSON representation of a list of strings.
|
||||||
|
//
|
||||||
|
// If the given key matches to a single string, it will return a list
|
||||||
|
// containing only the value that matches the key.
|
||||||
|
func (env *Env) GetList(key string) []string {
|
||||||
|
sval := env.Get(key)
|
||||||
|
if sval == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var l []string
|
||||||
|
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||||
|
l = append(l, sval)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetList stores the given list in the provided key, after serializing it to
|
||||||
|
// JSON format.
|
||||||
|
func (env *Env) SetList(key string, value []string) error {
|
||||||
|
return env.SetJSON(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set defines the value of a key to the given string.
|
||||||
|
func (env *Env) Set(key, value string) {
|
||||||
|
*env = append(*env, key+"="+value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode decodes `src` as a json dictionary, and adds each decoded key-value
|
||||||
|
// pair to the environment.
|
||||||
|
//
|
||||||
|
// If `src` cannot be decoded as a json dictionary, an error is returned.
|
||||||
|
func (env *Env) Decode(src io.Reader) error {
|
||||||
|
m := make(map[string]interface{})
|
||||||
|
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
env.SetAuto(k, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuto will try to define the Set* method to call based on the given value.
|
||||||
|
func (env *Env) SetAuto(key string, value interface{}) {
|
||||||
|
if fval, ok := value.(float64); ok {
|
||||||
|
env.SetInt64(key, int64(fval))
|
||||||
|
} else if sval, ok := value.(string); ok {
|
||||||
|
env.Set(key, sval)
|
||||||
|
} else if val, err := json.Marshal(value); err == nil {
|
||||||
|
env.Set(key, string(val))
|
||||||
|
} else {
|
||||||
|
env.Set(key, fmt.Sprintf("%v", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map returns the map representation of the env.
|
||||||
|
func (env *Env) Map() map[string]string {
|
||||||
|
if len(*env) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
m := make(map[string]string)
|
||||||
|
for _, kv := range *env {
|
||||||
|
parts := strings.SplitN(kv, "=", 2)
|
||||||
|
m[parts[0]] = parts[1]
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
370
vendor/github.com/fsouza/go-dockerclient/event.go
generated
vendored
Normal file
370
vendor/github.com/fsouza/go-dockerclient/event.go
generated
vendored
Normal file
@ -0,0 +1,370 @@
|
|||||||
|
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIEvents represents events coming from the Docker API
|
||||||
|
// The fields in the Docker API changed in API version 1.22, and
|
||||||
|
// events for more than images and containers are now fired off.
|
||||||
|
// To maintain forward and backward compatibility, go-dockerclient
|
||||||
|
// replicates the event in both the new and old format as faithfully as possible.
|
||||||
|
//
|
||||||
|
// For events that only exist in 1.22 in later, `Status` is filled in as
|
||||||
|
// `"Type:Action"` instead of just `Action` to allow for older clients to
|
||||||
|
// differentiate and not break if they rely on the pre-1.22 Status types.
|
||||||
|
//
|
||||||
|
// The transformEvent method can be consulted for more information about how
|
||||||
|
// events are translated from new/old API formats
|
||||||
|
type APIEvents struct {
|
||||||
|
// New API Fields in 1.22
|
||||||
|
Action string `json:"action,omitempty"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Actor APIActor `json:"actor,omitempty"`
|
||||||
|
|
||||||
|
// Old API fields for < 1.22
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
From string `json:"from,omitempty"`
|
||||||
|
|
||||||
|
// Fields in both
|
||||||
|
Time int64 `json:"time,omitempty"`
|
||||||
|
TimeNano int64 `json:"timeNano,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIActor represents an actor that accomplishes something for an event
|
||||||
|
type APIActor struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
Attributes map[string]string `json:"attributes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventMonitoringState struct {
|
||||||
|
sync.RWMutex
|
||||||
|
sync.WaitGroup
|
||||||
|
enabled bool
|
||||||
|
lastSeen *int64
|
||||||
|
C chan *APIEvents
|
||||||
|
errC chan error
|
||||||
|
listeners []chan<- *APIEvents
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxMonitorConnRetries = 5
|
||||||
|
retryInitialWaitTime = 10.
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoListeners is the error returned when no listeners are available
|
||||||
|
// to receive an event.
|
||||||
|
ErrNoListeners = errors.New("no listeners present to receive event")
|
||||||
|
|
||||||
|
// ErrListenerAlreadyExists is the error returned when the listerner already
|
||||||
|
// exists.
|
||||||
|
ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
|
||||||
|
|
||||||
|
// EOFEvent is sent when the event listener receives an EOF error.
|
||||||
|
EOFEvent = &APIEvents{
|
||||||
|
Type: "EOF",
|
||||||
|
Status: "EOF",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddEventListener adds a new listener to container events in the Docker API.
|
||||||
|
//
|
||||||
|
// The parameter is a channel through which events will be sent.
|
||||||
|
func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
|
||||||
|
var err error
|
||||||
|
if !c.eventMonitor.isEnabled() {
|
||||||
|
err = c.eventMonitor.enableEventMonitoring(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = c.eventMonitor.addListener(listener)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveEventListener removes a listener from the monitor.
|
||||||
|
func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
|
||||||
|
err := c.eventMonitor.removeListener(listener)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(c.eventMonitor.listeners) == 0 {
|
||||||
|
c.eventMonitor.disableEventMonitoring()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
|
||||||
|
eventState.Lock()
|
||||||
|
defer eventState.Unlock()
|
||||||
|
if listenerExists(listener, &eventState.listeners) {
|
||||||
|
return ErrListenerAlreadyExists
|
||||||
|
}
|
||||||
|
eventState.Add(1)
|
||||||
|
eventState.listeners = append(eventState.listeners, listener)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
|
||||||
|
eventState.Lock()
|
||||||
|
defer eventState.Unlock()
|
||||||
|
if listenerExists(listener, &eventState.listeners) {
|
||||||
|
var newListeners []chan<- *APIEvents
|
||||||
|
for _, l := range eventState.listeners {
|
||||||
|
if l != listener {
|
||||||
|
newListeners = append(newListeners, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eventState.listeners = newListeners
|
||||||
|
eventState.Add(-1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) closeListeners() {
|
||||||
|
for _, l := range eventState.listeners {
|
||||||
|
close(l)
|
||||||
|
eventState.Add(-1)
|
||||||
|
}
|
||||||
|
eventState.listeners = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
|
||||||
|
for _, b := range *list {
|
||||||
|
if b == a {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
|
||||||
|
eventState.Lock()
|
||||||
|
defer eventState.Unlock()
|
||||||
|
if !eventState.enabled {
|
||||||
|
eventState.enabled = true
|
||||||
|
var lastSeenDefault = int64(0)
|
||||||
|
eventState.lastSeen = &lastSeenDefault
|
||||||
|
eventState.C = make(chan *APIEvents, 100)
|
||||||
|
eventState.errC = make(chan error, 1)
|
||||||
|
go eventState.monitorEvents(c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) disableEventMonitoring() error {
|
||||||
|
eventState.Lock()
|
||||||
|
defer eventState.Unlock()
|
||||||
|
|
||||||
|
eventState.closeListeners()
|
||||||
|
|
||||||
|
eventState.Wait()
|
||||||
|
|
||||||
|
if eventState.enabled {
|
||||||
|
eventState.enabled = false
|
||||||
|
close(eventState.C)
|
||||||
|
close(eventState.errC)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
||||||
|
var err error
|
||||||
|
for eventState.noListeners() {
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
}
|
||||||
|
if err = eventState.connectWithRetry(c); err != nil {
|
||||||
|
// terminate if connect failed
|
||||||
|
eventState.disableEventMonitoring()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for eventState.isEnabled() {
|
||||||
|
timeout := time.After(100 * time.Millisecond)
|
||||||
|
select {
|
||||||
|
case ev, ok := <-eventState.C:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ev == EOFEvent {
|
||||||
|
eventState.disableEventMonitoring()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
eventState.updateLastSeen(ev)
|
||||||
|
go eventState.sendEvent(ev)
|
||||||
|
case err = <-eventState.errC:
|
||||||
|
if err == ErrNoListeners {
|
||||||
|
eventState.disableEventMonitoring()
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
defer func() { go eventState.monitorEvents(c) }()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-timeout:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
|
||||||
|
var retries int
|
||||||
|
var err error
|
||||||
|
for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
|
||||||
|
waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
|
||||||
|
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
||||||
|
err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) noListeners() bool {
|
||||||
|
eventState.RLock()
|
||||||
|
defer eventState.RUnlock()
|
||||||
|
return len(eventState.listeners) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) isEnabled() bool {
|
||||||
|
eventState.RLock()
|
||||||
|
defer eventState.RUnlock()
|
||||||
|
return eventState.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
|
||||||
|
eventState.RLock()
|
||||||
|
defer eventState.RUnlock()
|
||||||
|
eventState.Add(1)
|
||||||
|
defer eventState.Done()
|
||||||
|
if eventState.enabled {
|
||||||
|
if len(eventState.listeners) == 0 {
|
||||||
|
eventState.errC <- ErrNoListeners
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, listener := range eventState.listeners {
|
||||||
|
listener <- event
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
|
||||||
|
eventState.Lock()
|
||||||
|
defer eventState.Unlock()
|
||||||
|
if atomic.LoadInt64(eventState.lastSeen) < e.Time {
|
||||||
|
atomic.StoreInt64(eventState.lastSeen, e.Time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
|
||||||
|
uri := "/events"
|
||||||
|
if startTime != 0 {
|
||||||
|
uri += fmt.Sprintf("?since=%d", startTime)
|
||||||
|
}
|
||||||
|
protocol := c.endpointURL.Scheme
|
||||||
|
address := c.endpointURL.Path
|
||||||
|
if protocol != "unix" {
|
||||||
|
protocol = "tcp"
|
||||||
|
address = c.endpointURL.Host
|
||||||
|
}
|
||||||
|
var dial net.Conn
|
||||||
|
var err error
|
||||||
|
if c.TLSConfig == nil {
|
||||||
|
dial, err = c.Dialer.Dial(protocol, address)
|
||||||
|
} else {
|
||||||
|
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conn := httputil.NewClientConn(dial, nil)
|
||||||
|
req, err := http.NewRequest("GET", uri, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res, err := conn.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func(res *http.Response, conn *httputil.ClientConn) {
|
||||||
|
defer conn.Close()
|
||||||
|
defer res.Body.Close()
|
||||||
|
decoder := json.NewDecoder(res.Body)
|
||||||
|
for {
|
||||||
|
var event APIEvents
|
||||||
|
if err = decoder.Decode(&event); err != nil {
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
if c.eventMonitor.isEnabled() {
|
||||||
|
// Signal that we're exiting.
|
||||||
|
eventChan <- EOFEvent
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
|
if event.Time == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !c.eventMonitor.isEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
transformEvent(&event)
|
||||||
|
eventChan <- &event
|
||||||
|
}
|
||||||
|
}(res, conn)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// transformEvent takes an event and determines what version it is from
|
||||||
|
// then populates both versions of the event
|
||||||
|
func transformEvent(event *APIEvents) {
|
||||||
|
// if event version is <= 1.21 there will be no Action and no Type
|
||||||
|
if event.Action == "" && event.Type == "" {
|
||||||
|
event.Action = event.Status
|
||||||
|
event.Actor.ID = event.ID
|
||||||
|
event.Actor.Attributes = map[string]string{}
|
||||||
|
switch event.Status {
|
||||||
|
case "delete", "import", "pull", "push", "tag", "untag":
|
||||||
|
event.Type = "image"
|
||||||
|
default:
|
||||||
|
event.Type = "container"
|
||||||
|
if event.From != "" {
|
||||||
|
event.Actor.Attributes["image"] = event.From
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if event.Status == "" {
|
||||||
|
if event.Type == "image" || event.Type == "container" {
|
||||||
|
event.Status = event.Action
|
||||||
|
} else {
|
||||||
|
// Because just the Status has been overloaded with different Types
|
||||||
|
// if an event is not for an image or a container, we prepend the type
|
||||||
|
// to avoid problems for people relying on actions being only for
|
||||||
|
// images and containers
|
||||||
|
event.Status = event.Type + ":" + event.Action
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if event.ID == "" {
|
||||||
|
event.ID = event.Actor.ID
|
||||||
|
}
|
||||||
|
if event.From == "" {
|
||||||
|
event.From = event.Actor.Attributes["image"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
202
vendor/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
Normal file
202
vendor/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exec is the type representing a `docker exec` instance and containing the
|
||||||
|
// instance ID
|
||||||
|
type Exec struct {
|
||||||
|
ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateExecOptions specify parameters to the CreateExecContainer function.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/1KSIb7 for more details
|
||||||
|
type CreateExecOptions struct {
|
||||||
|
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
|
||||||
|
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
|
||||||
|
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
|
||||||
|
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||||
|
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
||||||
|
Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||||
|
User string `json:"User,omitempty" yaml:"User,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateExec sets up an exec instance in a running container `id`, returning the exec
|
||||||
|
// instance, or an error in case of failure.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/1KSIb7 for more details
|
||||||
|
func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
|
||||||
|
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
|
||||||
|
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||||
|
return nil, &NoSuchContainer{ID: opts.Container}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var exec Exec
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &exec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartExecOptions specify parameters to the StartExecContainer function.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/iQCnto for more details
|
||||||
|
type StartExecOptions struct {
|
||||||
|
Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
|
||||||
|
|
||||||
|
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||||
|
|
||||||
|
InputStream io.Reader `qs:"-"`
|
||||||
|
OutputStream io.Writer `qs:"-"`
|
||||||
|
ErrorStream io.Writer `qs:"-"`
|
||||||
|
|
||||||
|
// Use raw terminal? Usually true when the container contains a TTY.
|
||||||
|
RawTerminal bool `qs:"-"`
|
||||||
|
|
||||||
|
// If set, after a successful connect, a sentinel will be sent and then the
|
||||||
|
// client will block on receive before continuing.
|
||||||
|
//
|
||||||
|
// It must be an unbuffered channel. Using a buffered channel can lead
|
||||||
|
// to unexpected behavior.
|
||||||
|
Success chan struct{} `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartExec starts a previously set up exec instance id. If opts.Detach is
|
||||||
|
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||||
|
// interactive session with the exec command.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/iQCnto for more details
|
||||||
|
func (c *Client) StartExec(id string, opts StartExecOptions) error {
|
||||||
|
cw, err := c.StartExecNonBlocking(id, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cw != nil {
|
||||||
|
return cw.Wait()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
|
||||||
|
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||||
|
// interactive session with the exec command.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/iQCnto for more details
|
||||||
|
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
|
||||||
|
if id == "" {
|
||||||
|
return nil, &NoSuchExec{ID: id}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := fmt.Sprintf("/exec/%s/start", id)
|
||||||
|
|
||||||
|
if opts.Detach {
|
||||||
|
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||||
|
return nil, &NoSuchExec{ID: id}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.hijack("POST", path, hijackOptions{
|
||||||
|
success: opts.Success,
|
||||||
|
setRawTerminal: opts.RawTerminal,
|
||||||
|
in: opts.InputStream,
|
||||||
|
stdout: opts.OutputStream,
|
||||||
|
stderr: opts.ErrorStream,
|
||||||
|
data: opts,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResizeExecTTY resizes the tty session used by the exec command id. This API
|
||||||
|
// is valid only if Tty was specified as part of creating and starting the exec
|
||||||
|
// command.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/e1JpsA for more details
|
||||||
|
func (c *Client) ResizeExecTTY(id string, height, width int) error {
|
||||||
|
params := make(url.Values)
|
||||||
|
params.Set("h", strconv.Itoa(height))
|
||||||
|
params.Set("w", strconv.Itoa(width))
|
||||||
|
|
||||||
|
path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
|
||||||
|
resp, err := c.do("POST", path, doOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecProcessConfig is a type describing the command associated to a Exec
|
||||||
|
// instance. It's used in the ExecInspect type.
|
||||||
|
type ExecProcessConfig struct {
|
||||||
|
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
|
||||||
|
User string `json:"user,omitempty" yaml:"user,omitempty"`
|
||||||
|
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
|
||||||
|
EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
|
||||||
|
Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecInspect is a type with details about a exec instance, including the
|
||||||
|
// exit code if the command has finished running. It's returned by a api
|
||||||
|
// call to /exec/(id)/json
|
||||||
|
//
|
||||||
|
// See https://goo.gl/gPtX9R for more details
|
||||||
|
type ExecInspect struct {
|
||||||
|
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||||
|
Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
|
||||||
|
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
|
||||||
|
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
|
||||||
|
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
|
||||||
|
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
|
||||||
|
ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
|
||||||
|
Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InspectExec returns low-level information about the exec command id.
|
||||||
|
//
|
||||||
|
// See https://goo.gl/gPtX9R for more details
|
||||||
|
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
|
||||||
|
path := fmt.Sprintf("/exec/%s/json", id)
|
||||||
|
resp, err := c.do("GET", path, doOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||||
|
return nil, &NoSuchExec{ID: id}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
var exec ExecInspect
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &exec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoSuchExec is the error returned when a given exec instance does not exist.
|
||||||
|
type NoSuchExec struct {
|
||||||
|
ID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *NoSuchExec) Error() string {
|
||||||
|
return "No such exec instance: " + err.ID
|
||||||
|
}
|
||||||
55
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
55
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# 0.9.0 (Unreleased)
|
||||||
|
|
||||||
|
* logrus/text_formatter: don't emit empty msg
|
||||||
|
* logrus/hooks/airbrake: move out of main repository
|
||||||
|
* logrus/hooks/sentry: move out of main repository
|
||||||
|
* logrus/hooks/papertrail: move out of main repository
|
||||||
|
* logrus/hooks/bugsnag: move out of main repository
|
||||||
|
|
||||||
|
# 0.8.7
|
||||||
|
|
||||||
|
* logrus/core: fix possible race (#216)
|
||||||
|
* logrus/doc: small typo fixes and doc improvements
|
||||||
|
|
||||||
|
|
||||||
|
# 0.8.6
|
||||||
|
|
||||||
|
* hooks/raven: allow passing an initialized client
|
||||||
|
|
||||||
|
# 0.8.5
|
||||||
|
|
||||||
|
* logrus/core: revert #208
|
||||||
|
|
||||||
|
# 0.8.4
|
||||||
|
|
||||||
|
* formatter/text: fix data race (#218)
|
||||||
|
|
||||||
|
# 0.8.3
|
||||||
|
|
||||||
|
* logrus/core: fix entry log level (#208)
|
||||||
|
* logrus/core: improve performance of text formatter by 40%
|
||||||
|
* logrus/core: expose `LevelHooks` type
|
||||||
|
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||||
|
* formatter/text: print structs more verbosely
|
||||||
|
|
||||||
|
# 0.8.2
|
||||||
|
|
||||||
|
* logrus: fix more Fatal family functions
|
||||||
|
|
||||||
|
# 0.8.1
|
||||||
|
|
||||||
|
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||||
|
|
||||||
|
# 0.8.0
|
||||||
|
|
||||||
|
* logrus: defaults to stderr instead of stdout
|
||||||
|
* hooks/sentry: add special field for `*http.Request`
|
||||||
|
* formatter/text: ignore Windows for colors
|
||||||
|
|
||||||
|
# 0.7.3
|
||||||
|
|
||||||
|
* formatter/\*: allow configuration of timestamp layout
|
||||||
|
|
||||||
|
# 0.7.2
|
||||||
|
|
||||||
|
* formatter/text: Add configuration option for time format (#158)
|
||||||
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
|||||||
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [][godoc]
|
||||||
|
|
||||||
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
|
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||||
|
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||||
|
many large deployments. The core API is unlikely to change much but please
|
||||||
|
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||||
|
every build.**
|
||||||
|
|
||||||
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
|
plain text):
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||||
|
or Splunk:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||||
|
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||||
|
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||||
|
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||||
|
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||||
|
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
||||||
|
attached, the output is compatible with the
|
||||||
|
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
|
exit status 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||||
|
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||||
|
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||||
|
want:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Log as JSON instead of the default ASCII formatter.
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
|
||||||
|
// Output to stderr instead of stdout, could also be a file.
|
||||||
|
log.SetOutput(os.Stderr)
|
||||||
|
|
||||||
|
// Only log the warning severity or above.
|
||||||
|
log.SetLevel(log.WarnLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
|
||||||
|
// A common pattern is to re-use fields between logging statements by re-using
|
||||||
|
// the logrus.Entry returned from WithFields()
|
||||||
|
contextLogger := log.WithFields(log.Fields{
|
||||||
|
"common": "this is a common field",
|
||||||
|
"other": "I also should be logged always",
|
||||||
|
})
|
||||||
|
|
||||||
|
contextLogger.Info("I'll be logged with common and other field")
|
||||||
|
contextLogger.Info("Me too")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For more advanced usage such as logging to multiple locations from the same
|
||||||
|
application, you can also create an instance of the `logrus` Logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a new instance of the logger. You can have any number of instances.
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// The API for setting attributes is a little different than the package level
|
||||||
|
// exported logger. See Godoc.
|
||||||
|
log.Out = os.Stderr
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fields
|
||||||
|
|
||||||
|
Logrus encourages careful, structured logging though logging fields instead of
|
||||||
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||||
|
to send event %s to topic %s with key %d")`, you should log the much more
|
||||||
|
discoverable:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"event": event,
|
||||||
|
"topic": topic,
|
||||||
|
"key": key,
|
||||||
|
}).Fatal("Failed to send event")
|
||||||
|
```
|
||||||
|
|
||||||
|
We've found this API forces you to think about logging in a way that produces
|
||||||
|
much more useful logging messages. We've been in countless situations where just
|
||||||
|
a single added field to a log statement that was already there would've saved us
|
||||||
|
hours. The `WithFields` call is optional.
|
||||||
|
|
||||||
|
In general, with Logrus using any of the `printf`-family functions should be
|
||||||
|
seen as a hint you should add a field, however, you can still use the
|
||||||
|
`printf`-family functions with Logrus.
|
||||||
|
|
||||||
|
#### Hooks
|
||||||
|
|
||||||
|
You can add hooks for logging levels. For example to send errors to an exception
|
||||||
|
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||||
|
multiple places simultaneously, e.g. syslog.
|
||||||
|
|
||||||
|
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
|
`init`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||||
|
"log/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||||
|
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||||
|
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to connect to local syslog daemon")
|
||||||
|
} else {
|
||||||
|
log.AddHook(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
|
| Hook | Description |
|
||||||
|
| ----- | ----------- |
|
||||||
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||||
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||||
|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||||
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||||
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||||
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||||
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||||
|
|
||||||
|
#### Level logging
|
||||||
|
|
||||||
|
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.Debug("Useful debugging information.")
|
||||||
|
log.Info("Something noteworthy happened!")
|
||||||
|
log.Warn("You should probably take a look at this.")
|
||||||
|
log.Error("Something failed but I'm not quitting.")
|
||||||
|
// Calls os.Exit(1) after logging
|
||||||
|
log.Fatal("Bye.")
|
||||||
|
// Calls panic() after logging
|
||||||
|
log.Panic("I'm bailing.")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set the logging level on a `Logger`, then it will only log entries with
|
||||||
|
that severity or anything above it:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||||
|
log.SetLevel(log.InfoLevel)
|
||||||
|
```
|
||||||
|
|
||||||
|
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||||
|
environment if your application has that.
|
||||||
|
|
||||||
|
#### Entries
|
||||||
|
|
||||||
|
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||||
|
automatically added to all logging events:
|
||||||
|
|
||||||
|
1. `time`. The timestamp when the entry was created.
|
||||||
|
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||||
|
the `AddFields` call. E.g. `Failed to send event.`
|
||||||
|
3. `level`. The logging level. E.g. `info`.
|
||||||
|
|
||||||
|
#### Environments
|
||||||
|
|
||||||
|
Logrus has no notion of environment.
|
||||||
|
|
||||||
|
If you wish for hooks and formatters to only be used in specific environments,
|
||||||
|
you should handle that yourself. For example, if your application has a global
|
||||||
|
variable `Environment`, which is a string representation of the environment you
|
||||||
|
could do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
init() {
|
||||||
|
// do something here to set environment depending on an environment variable
|
||||||
|
// or command-line flag
|
||||||
|
if Environment == "production" {
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
} else {
|
||||||
|
// The TextFormatter is default, you don't actually have to do this.
|
||||||
|
log.SetFormatter(&log.TextFormatter{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration is how `logrus` was intended to be used, but JSON in
|
||||||
|
production is mostly only useful if you do log aggregation with tools like
|
||||||
|
Splunk or Logstash.
|
||||||
|
|
||||||
|
#### Formatters
|
||||||
|
|
||||||
|
The built-in logging formatters are:
|
||||||
|
|
||||||
|
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||||
|
without colors.
|
||||||
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||||
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
|
`DisableColors` field to `true`
|
||||||
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
|
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
|
|
||||||
|
```go
|
||||||
|
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
|
||||||
|
```
|
||||||
|
|
||||||
|
Third party logging formatters:
|
||||||
|
|
||||||
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
|
||||||
|
You can define your formatter by implementing the `Formatter` interface,
|
||||||
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||||
|
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||||
|
default ones (see Entries section above):
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MyJSONFormatter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetFormatter(new(MyJSONFormatter))
|
||||||
|
|
||||||
|
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
// Note this doesn't include Time, Level and Message which are available on
|
||||||
|
// the Entry. Consult `godoc` on information about those fields or read the
|
||||||
|
// source of the official loggers.
|
||||||
|
serialized, err := json.Marshal(entry.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logger as an `io.Writer`
|
||||||
|
|
||||||
|
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||||
|
|
||||||
|
```go
|
||||||
|
w := logger.Writer()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
// create a stdlib log.Logger that writes to
|
||||||
|
// logrus.Logger.
|
||||||
|
ErrorLog: log.New(w, "", 0),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Each line written to that writer will be printed the usual way, using formatters
|
||||||
|
and hooks. The level for those entries is `info`.
|
||||||
|
|
||||||
|
#### Rotation
|
||||||
|
|
||||||
|
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||||
|
external program (like `logrotate(8)`) that can compress and delete old log
|
||||||
|
entries. It should not be a feature of the application-level logger.
|
||||||
|
|
||||||
|
#### Tools
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
| ---- | ----------- |
|
||||||
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||||
|
|
||||||
|
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
||||||
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/*
|
||||||
|
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||||
|
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 1,
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
|
For a full guide visit https://github.com/Sirupsen/logrus
|
||||||
|
*/
|
||||||
|
package logrus
|
||||||
264
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
264
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Defines the key when adding errors using WithError.
|
||||||
|
var ErrorKey = "error"
|
||||||
|
|
||||||
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
|
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||||
|
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||||
|
// passed around as much as you wish to avoid field duplication.
|
||||||
|
type Entry struct {
|
||||||
|
Logger *Logger
|
||||||
|
|
||||||
|
// Contains all the fields set by the user.
|
||||||
|
Data Fields
|
||||||
|
|
||||||
|
// Time at which the log entry was created
|
||||||
|
Time time.Time
|
||||||
|
|
||||||
|
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Level Level
|
||||||
|
|
||||||
|
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEntry(logger *Logger) *Entry {
|
||||||
|
return &Entry{
|
||||||
|
Logger: logger,
|
||||||
|
// Default is three fields, give a little extra room
|
||||||
|
Data: make(Fields, 5),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||||
|
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
return bytes.NewBuffer(serialized), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the string representation from the reader and ultimately the
|
||||||
|
// formatter.
|
||||||
|
func (entry *Entry) String() (string, error) {
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||||
|
func (entry *Entry) WithError(err error) *Entry {
|
||||||
|
return entry.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a single field to the Entry.
|
||||||
|
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||||
|
return entry.WithFields(Fields{key: value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a map of fields to the Entry.
|
||||||
|
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
|
data := Fields{}
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range fields {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is not declared with a pointer value because otherwise
|
||||||
|
// race conditions will occur when using multiple goroutines
|
||||||
|
func (entry Entry) log(level Level, msg string) {
|
||||||
|
entry.Time = time.Now()
|
||||||
|
entry.Level = level
|
||||||
|
entry.Message = msg
|
||||||
|
|
||||||
|
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
|
||||||
|
_, err = io.Copy(entry.Logger.Out, reader)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
// directly here.
|
||||||
|
if level <= PanicLevel {
|
||||||
|
panic(&entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Print(args ...interface{}) {
|
||||||
|
entry.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warning(args ...interface{}) {
|
||||||
|
entry.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
panic(fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
|
entry.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Println(args ...interface{}) {
|
||||||
|
entry.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
|
entry.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||||
|
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||||
|
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||||
|
// string allocation, we do the simplest thing.
|
||||||
|
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||||
|
msg := fmt.Sprintln(args...)
|
||||||
|
return msg[:len(msg)-1]
|
||||||
|
}
|
||||||
193
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
193
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// std is the name of the standard logger in stdlib `log`
|
||||||
|
std = New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func StandardLogger() *Logger {
|
||||||
|
return std
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the standard logger output.
|
||||||
|
func SetOutput(out io.Writer) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Out = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the standard logger formatter.
|
||||||
|
func SetFormatter(formatter Formatter) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the standard logger level.
|
||||||
|
func SetLevel(level Level) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Level = level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the standard logger level.
|
||||||
|
func GetLevel() Level {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
return std.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
|
func AddHook(hook Hook) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
func WithError(err error) *Entry {
|
||||||
|
return std.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithField creates an entry from the standard logger and adds a field to
|
||||||
|
// it. If you want multiple fields, use `WithFields`.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithField(key string, value interface{}) *Entry {
|
||||||
|
return std.WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields creates an entry from the standard logger and adds multiple
|
||||||
|
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||||
|
// once for each field.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithFields(fields Fields) *Entry {
|
||||||
|
return std.WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
|
func Debug(args ...interface{}) {
|
||||||
|
std.Debug(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print logs a message at level Info on the standard logger.
|
||||||
|
func Print(args ...interface{}) {
|
||||||
|
std.Print(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs a message at level Info on the standard logger.
|
||||||
|
func Info(args ...interface{}) {
|
||||||
|
std.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn logs a message at level Warn on the standard logger.
|
||||||
|
func Warn(args ...interface{}) {
|
||||||
|
std.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning logs a message at level Warn on the standard logger.
|
||||||
|
func Warning(args ...interface{}) {
|
||||||
|
std.Warning(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs a message at level Error on the standard logger.
|
||||||
|
func Error(args ...interface{}) {
|
||||||
|
std.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panic logs a message at level Panic on the standard logger.
|
||||||
|
func Panic(args ...interface{}) {
|
||||||
|
std.Panic(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatal(args ...interface{}) {
|
||||||
|
std.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
|
func Debugf(format string, args ...interface{}) {
|
||||||
|
std.Debugf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf logs a message at level Info on the standard logger.
|
||||||
|
func Printf(format string, args ...interface{}) {
|
||||||
|
std.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof logs a message at level Info on the standard logger.
|
||||||
|
func Infof(format string, args ...interface{}) {
|
||||||
|
std.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnf logs a message at level Warn on the standard logger.
|
||||||
|
func Warnf(format string, args ...interface{}) {
|
||||||
|
std.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf logs a message at level Warn on the standard logger.
|
||||||
|
func Warningf(format string, args ...interface{}) {
|
||||||
|
std.Warningf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf logs a message at level Error on the standard logger.
|
||||||
|
func Errorf(format string, args ...interface{}) {
|
||||||
|
std.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicf logs a message at level Panic on the standard logger.
|
||||||
|
func Panicf(format string, args ...interface{}) {
|
||||||
|
std.Panicf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
std.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
|
func Debugln(args ...interface{}) {
|
||||||
|
std.Debugln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println logs a message at level Info on the standard logger.
|
||||||
|
func Println(args ...interface{}) {
|
||||||
|
std.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infoln logs a message at level Info on the standard logger.
|
||||||
|
func Infoln(args ...interface{}) {
|
||||||
|
std.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnln logs a message at level Warn on the standard logger.
|
||||||
|
func Warnln(args ...interface{}) {
|
||||||
|
std.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningln logs a message at level Warn on the standard logger.
|
||||||
|
func Warningln(args ...interface{}) {
|
||||||
|
std.Warningln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorln logs a message at level Error on the standard logger.
|
||||||
|
func Errorln(args ...interface{}) {
|
||||||
|
std.Errorln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicln logs a message at level Panic on the standard logger.
|
||||||
|
func Panicln(args ...interface{}) {
|
||||||
|
std.Panicln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalln(args ...interface{}) {
|
||||||
|
std.Fatalln(args...)
|
||||||
|
}
|
||||||
48
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
48
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const DefaultTimestampFormat = time.RFC3339
|
||||||
|
|
||||||
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
//
|
||||||
|
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||||
|
// * `entry.Data["time"]`. The timestamp.
|
||||||
|
// * `entry.Data["level"]. The level the entry was logged at.
|
||||||
|
//
|
||||||
|
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||||
|
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||||
|
// logged to `logger.Out`.
|
||||||
|
type Formatter interface {
|
||||||
|
Format(*Entry) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||||
|
// dumping it. If this code wasn't there doing:
|
||||||
|
//
|
||||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
//
|
||||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||||
|
// it'll logged as:
|
||||||
|
//
|
||||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||||
|
//
|
||||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
|
// avoid code duplication between the two default formatters.
|
||||||
|
func prefixFieldClashes(data Fields) {
|
||||||
|
_, ok := data["time"]
|
||||||
|
if ok {
|
||||||
|
data["fields.time"] = data["time"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["msg"]
|
||||||
|
if ok {
|
||||||
|
data["fields.msg"] = data["msg"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["level"]
|
||||||
|
if ok {
|
||||||
|
data["fields.level"] = data["level"]
|
||||||
|
}
|
||||||
|
}
|
||||||
34
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
generated
vendored
Normal file
34
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
// A hook to be fired when logging on the logging levels returned from
|
||||||
|
// `Levels()` on your implementation of the interface. Note that this is not
|
||||||
|
// fired in a goroutine or a channel with workers, you should handle such
|
||||||
|
// functionality yourself if your call is non-blocking and you don't wish for
|
||||||
|
// the logging calls for levels returned from `Levels()` to block.
|
||||||
|
type Hook interface {
|
||||||
|
Levels() []Level
|
||||||
|
Fire(*Entry) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal type for storing the hooks on a logger instance.
|
||||||
|
type LevelHooks map[Level][]Hook
|
||||||
|
|
||||||
|
// Add a hook to an instance of logger. This is called with
|
||||||
|
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||||
|
func (hooks LevelHooks) Add(hook Hook) {
|
||||||
|
for _, level := range hook.Levels() {
|
||||||
|
hooks[level] = append(hooks[level], hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||||
|
// appropriate hooks for a log entry.
|
||||||
|
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||||
|
for _, hook := range hooks[level] {
|
||||||
|
if err := hook.Fire(entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
41
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
41
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
data := make(Fields, len(entry.Data)+3)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case error:
|
||||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
|
// https://github.com/Sirupsen/logrus/issues/137
|
||||||
|
data[k] = v.Error()
|
||||||
|
default:
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data)
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
data["time"] = entry.Time.Format(timestampFormat)
|
||||||
|
data["msg"] = entry.Message
|
||||||
|
data["level"] = entry.Level.String()
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
212
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
212
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
|
// something more adventorous, such as logging to Kafka.
|
||||||
|
Out io.Writer
|
||||||
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
// service, log to StatsD or dump the core on fatal errors.
|
||||||
|
Hooks LevelHooks
|
||||||
|
// All log entries pass through the formatter before logged to Out. The
|
||||||
|
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||||
|
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||||
|
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||||
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
|
// formatters for examples.
|
||||||
|
Formatter Formatter
|
||||||
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
|
// logged. `logrus.Debug` is useful in
|
||||||
|
Level Level
|
||||||
|
// Used to sync writing to the log.
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||||
|
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||||
|
// instantiate your own:
|
||||||
|
//
|
||||||
|
// var log = &Logger{
|
||||||
|
// Out: os.Stderr,
|
||||||
|
// Formatter: new(JSONFormatter),
|
||||||
|
// Hooks: make(LevelHooks),
|
||||||
|
// Level: logrus.DebugLevel,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// It's recommended to make this a global instance called `log`.
|
||||||
|
func New() *Logger {
|
||||||
|
return &Logger{
|
||||||
|
Out: os.Stderr,
|
||||||
|
Formatter: new(TextFormatter),
|
||||||
|
Hooks: make(LevelHooks),
|
||||||
|
Level: InfoLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||||
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||||
|
// If you want multiple fields, use `WithFields`.
|
||||||
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
|
return NewEntry(logger).WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||||
|
// each `Field`.
|
||||||
|
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||||
|
return NewEntry(logger).WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field to the log entry. All it does is call
|
||||||
|
// `WithError` for the given `error`.
|
||||||
|
func (logger *Logger) WithError(err error) *Entry {
|
||||||
|
return NewEntry(logger).WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infof(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
|
NewEntry(logger).Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debug(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Print(args ...interface{}) {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Error(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatal(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panic(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infoln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Println(args ...interface{}) {
|
||||||
|
NewEntry(logger).Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalln(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
98
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
98
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fields type, used to pass to `WithFields`.
|
||||||
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
|
// Level type
|
||||||
|
type Level uint8
|
||||||
|
|
||||||
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
|
func (level Level) String() string {
|
||||||
|
switch level {
|
||||||
|
case DebugLevel:
|
||||||
|
return "debug"
|
||||||
|
case InfoLevel:
|
||||||
|
return "info"
|
||||||
|
case WarnLevel:
|
||||||
|
return "warning"
|
||||||
|
case ErrorLevel:
|
||||||
|
return "error"
|
||||||
|
case FatalLevel:
|
||||||
|
return "fatal"
|
||||||
|
case PanicLevel:
|
||||||
|
return "panic"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||||
|
func ParseLevel(lvl string) (Level, error) {
|
||||||
|
switch lvl {
|
||||||
|
case "panic":
|
||||||
|
return PanicLevel, nil
|
||||||
|
case "fatal":
|
||||||
|
return FatalLevel, nil
|
||||||
|
case "error":
|
||||||
|
return ErrorLevel, nil
|
||||||
|
case "warn", "warning":
|
||||||
|
return WarnLevel, nil
|
||||||
|
case "info":
|
||||||
|
return InfoLevel, nil
|
||||||
|
case "debug":
|
||||||
|
return DebugLevel, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var l Level
|
||||||
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
// on your instance of logger, obtained with `logrus.New()`.
|
||||||
|
const (
|
||||||
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
|
// message passed to Debug, Info, ...
|
||||||
|
PanicLevel Level = iota
|
||||||
|
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||||
|
// logging level is set to Panic.
|
||||||
|
FatalLevel
|
||||||
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
// Commonly used for hooks to send errors to an error tracking service.
|
||||||
|
ErrorLevel
|
||||||
|
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||||
|
WarnLevel
|
||||||
|
// InfoLevel level. General operational entries about what's going on inside the
|
||||||
|
// application.
|
||||||
|
InfoLevel
|
||||||
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
|
DebugLevel
|
||||||
|
)
|
||||||
|
|
||||||
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
var (
|
||||||
|
_ StdLogger = &log.Logger{}
|
||||||
|
_ StdLogger = &Entry{}
|
||||||
|
_ StdLogger = &Logger{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is what your logrus-enabled library should take, that way
|
||||||
|
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||||
|
// interface, this is the closest we get, unfortunately.
|
||||||
|
type StdLogger interface {
|
||||||
|
Print(...interface{})
|
||||||
|
Printf(string, ...interface{})
|
||||||
|
Println(...interface{})
|
||||||
|
|
||||||
|
Fatal(...interface{})
|
||||||
|
Fatalf(string, ...interface{})
|
||||||
|
Fatalln(...interface{})
|
||||||
|
|
||||||
|
Panic(...interface{})
|
||||||
|
Panicf(string, ...interface{})
|
||||||
|
Panicln(...interface{})
|
||||||
|
}
|
||||||
9
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
9
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// +build darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
||||||
12
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
12
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TCGETS
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
||||||
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var termios Termios
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
|
return err == 0
|
||||||
|
}
|
||||||
15
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
Normal file
15
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
27
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
27
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
||||||
161
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
161
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nocolor = 0
|
||||||
|
red = 31
|
||||||
|
green = 32
|
||||||
|
yellow = 33
|
||||||
|
blue = 34
|
||||||
|
gray = 37
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
baseTimestamp time.Time
|
||||||
|
isTerminal bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
baseTimestamp = time.Now()
|
||||||
|
isTerminal = IsTerminal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func miniTS() int {
|
||||||
|
return int(time.Since(baseTimestamp) / time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextFormatter struct {
|
||||||
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
|
ForceColors bool
|
||||||
|
|
||||||
|
// Force disabling colors.
|
||||||
|
DisableColors bool
|
||||||
|
|
||||||
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
|
// system that already adds timestamps.
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||||
|
// the time passed since beginning of execution.
|
||||||
|
FullTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat to use for display when a full timestamp is printed
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// The fields are sorted by default for a consistent output. For applications
|
||||||
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
|
// be desired.
|
||||||
|
DisableSorting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
var keys []string = make([]string, 0, len(entry.Data))
|
||||||
|
for k := range entry.Data {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
sort.Strings(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
|
||||||
|
prefixFieldClashes(entry.Data)
|
||||||
|
|
||||||
|
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||||
|
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
if isColored {
|
||||||
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
|
} else {
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||||
|
}
|
||||||
|
f.appendKeyValue(b, "level", entry.Level.String())
|
||||||
|
if entry.Message != "" {
|
||||||
|
f.appendKeyValue(b, "msg", entry.Message)
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
f.appendKeyValue(b, key, entry.Data[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte('\n')
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
|
var levelColor int
|
||||||
|
switch entry.Level {
|
||||||
|
case DebugLevel:
|
||||||
|
levelColor = gray
|
||||||
|
case WarnLevel:
|
||||||
|
levelColor = yellow
|
||||||
|
case ErrorLevel, FatalLevel, PanicLevel:
|
||||||
|
levelColor = red
|
||||||
|
default:
|
||||||
|
levelColor = blue
|
||||||
|
}
|
||||||
|
|
||||||
|
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||||
|
|
||||||
|
if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
v := entry.Data[k]
|
||||||
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsQuoting(text string) bool {
|
||||||
|
for _, ch := range text {
|
||||||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
|
(ch >= '0' && ch <= '9') ||
|
||||||
|
ch == '-' || ch == '.') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
|
||||||
|
b.WriteString(key)
|
||||||
|
b.WriteByte('=')
|
||||||
|
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
if needsQuoting(value) {
|
||||||
|
b.WriteString(value)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
case error:
|
||||||
|
errmsg := value.Error()
|
||||||
|
if needsQuoting(errmsg) {
|
||||||
|
b.WriteString(errmsg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(b, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
||||||
31
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
generated
vendored
Normal file
31
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
|
go logger.writerScanner(reader)
|
||||||
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
|
return writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
logger.Print(scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
logger.Errorf("Error while reading from Writer: %s", err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func writerFinalizer(writer *io.PipeWriter) {
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
67
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
generated
vendored
Normal file
67
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseEnvFile reads a file with environment variables enumerated by lines
|
||||||
|
//
|
||||||
|
// ``Environment variable names used by the utilities in the Shell and
|
||||||
|
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||||
|
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||||
|
// Portable Character Set and do not begin with a digit. *But*, other
|
||||||
|
// characters may be permitted by an implementation; applications shall
|
||||||
|
// tolerate the presence of such names.''
|
||||||
|
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||||
|
//
|
||||||
|
// As of #16585, it's up to application inside docker to validate or not
|
||||||
|
// environment variables, that's why we just strip leading whitespace and
|
||||||
|
// nothing more.
|
||||||
|
func ParseEnvFile(filename string) ([]string, error) {
|
||||||
|
fh, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
lines := []string{}
|
||||||
|
scanner := bufio.NewScanner(fh)
|
||||||
|
for scanner.Scan() {
|
||||||
|
// trim the line from all leading whitespace first
|
||||||
|
line := strings.TrimLeft(scanner.Text(), whiteSpaces)
|
||||||
|
// line is not empty, and not starting with '#'
|
||||||
|
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||||
|
data := strings.SplitN(line, "=", 2)
|
||||||
|
|
||||||
|
// trim the front of a variable, but nothing else
|
||||||
|
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||||
|
if strings.ContainsAny(variable, whiteSpaces) {
|
||||||
|
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) > 1 {
|
||||||
|
|
||||||
|
// pass the value through, no trimming
|
||||||
|
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
||||||
|
} else {
|
||||||
|
// if only a pass-through variable is given, clean it up.
|
||||||
|
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lines, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
var whiteSpaces = " \t"
|
||||||
|
|
||||||
|
// ErrBadEnvVariable typed error for bad environment variable
|
||||||
|
type ErrBadEnvVariable struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrBadEnvVariable) Error() string {
|
||||||
|
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||||
|
}
|
||||||
146
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go
generated
vendored
Normal file
146
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
|
||||||
|
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
|
||||||
|
// is not supplied. A better longer term solution would be to use a named
|
||||||
|
// pipe as the default on the Windows daemon.
|
||||||
|
// These are the IANA registered port numbers for use with Docker
|
||||||
|
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
||||||
|
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||||
|
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
||||||
|
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
||||||
|
// DefaultUnixSocket Path for the unix socket.
|
||||||
|
// Docker daemon by default always listens on the default unix socket
|
||||||
|
DefaultUnixSocket = "/var/run/docker.sock"
|
||||||
|
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
||||||
|
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||||
|
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
||||||
|
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateHost validates that the specified string is a valid host and returns it.
|
||||||
|
func ValidateHost(val string) (string, error) {
|
||||||
|
_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
|
||||||
|
if err != nil {
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
// Note: unlike most flag validators, we don't return the mutated value here
|
||||||
|
// we need to know what the user entered later (using ParseHost) to adjust for tls
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseHost and set defaults for a Daemon host string
|
||||||
|
func ParseHost(defaultHost, val string) (string, error) {
|
||||||
|
host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
|
||||||
|
if err != nil {
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
return host, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
||||||
|
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
|
||||||
|
// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
|
||||||
|
// defaultTCPAddr must be the full `tcp://host:port` form
|
||||||
|
func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
|
||||||
|
addr = strings.TrimSpace(addr)
|
||||||
|
if addr == "" {
|
||||||
|
if defaultAddr == defaultTLSHost {
|
||||||
|
return defaultTLSHost, nil
|
||||||
|
}
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
|
||||||
|
}
|
||||||
|
return defaultTCPAddr, nil
|
||||||
|
}
|
||||||
|
addrParts := strings.Split(addr, "://")
|
||||||
|
if len(addrParts) == 1 {
|
||||||
|
addrParts = []string{"tcp", addrParts[0]}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch addrParts[0] {
|
||||||
|
case "tcp":
|
||||||
|
return parseTCPAddr(addrParts[1], defaultTCPAddr)
|
||||||
|
case "unix":
|
||||||
|
return parseUnixAddr(addrParts[1], defaultUnixAddr)
|
||||||
|
case "fd":
|
||||||
|
return addr, nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUnixAddr parses and validates that the specified address is a valid UNIX
|
||||||
|
// socket address. It returns a formatted UNIX socket address, either using the
|
||||||
|
// address parsed from addr, or the contents of defaultAddr if addr is a blank
|
||||||
|
// string.
|
||||||
|
func parseUnixAddr(addr string, defaultAddr string) (string, error) {
|
||||||
|
addr = strings.TrimPrefix(addr, "unix://")
|
||||||
|
if strings.Contains(addr, "://") {
|
||||||
|
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
||||||
|
}
|
||||||
|
if addr == "" {
|
||||||
|
addr = defaultAddr
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("unix://%s", addr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTCPAddr parses and validates that the specified address is a valid TCP
|
||||||
|
// address. It returns a formatted TCP address, either using the address parsed
|
||||||
|
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
||||||
|
// tryAddr is expected to have already been Trim()'d
|
||||||
|
// defaultAddr must be in the full `tcp://host:port` form
|
||||||
|
func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
||||||
|
if tryAddr == "" || tryAddr == "tcp://" {
|
||||||
|
return defaultAddr, nil
|
||||||
|
}
|
||||||
|
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
||||||
|
if strings.Contains(addr, "://") || addr == "" {
|
||||||
|
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
||||||
|
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
||||||
|
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
||||||
|
// https://github.com/golang/go/issues/6530.
|
||||||
|
if strings.HasSuffix(addr, "]:") {
|
||||||
|
addr += defaultPort
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse("tcp://" + addr)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if host == "" {
|
||||||
|
host = defaultHost
|
||||||
|
}
|
||||||
|
if port == "" {
|
||||||
|
port = defaultPort
|
||||||
|
}
|
||||||
|
p, err := strconv.Atoi(port)
|
||||||
|
if err != nil && p == 0 {
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
||||||
|
}
|
||||||
8
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
generated
vendored
Normal file
8
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
||||||
|
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
||||||
6
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
Normal file
6
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
// DefaultHost constant defines the default host string used by docker on Windows
|
||||||
|
var DefaultHost = DefaultTCPHost
|
||||||
42
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
Normal file
42
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPOpt holds an IP. It is used to store values from CLI flags.
|
||||||
|
type IPOpt struct {
|
||||||
|
*net.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
||||||
|
// string representation of an IP. If the string is not a valid
|
||||||
|
// IP it will fallback to the specified reference.
|
||||||
|
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
||||||
|
o := &IPOpt{
|
||||||
|
IP: ref,
|
||||||
|
}
|
||||||
|
o.Set(defaultVal)
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
||||||
|
// string is not parseable as an IP address it returns an error.
|
||||||
|
func (o *IPOpt) Set(val string) error {
|
||||||
|
ip := net.ParseIP(val)
|
||||||
|
if ip == nil {
|
||||||
|
return fmt.Errorf("%s is not an ip address", val)
|
||||||
|
}
|
||||||
|
*o.IP = ip
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the IP address stored in the IPOpt. If stored IP is a
|
||||||
|
// nil pointer, it returns an empty string.
|
||||||
|
func (o *IPOpt) String() string {
|
||||||
|
if *o.IP == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return o.IP.String()
|
||||||
|
}
|
||||||
252
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
Normal file
252
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
Normal file
@ -0,0 +1,252 @@
|
|||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
||||||
|
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListOpts holds a list of values and a validation function.
|
||||||
|
type ListOpts struct {
|
||||||
|
values *[]string
|
||||||
|
validator ValidatorFctType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListOpts creates a new ListOpts with the specified validator.
|
||||||
|
func NewListOpts(validator ValidatorFctType) ListOpts {
|
||||||
|
var values []string
|
||||||
|
return *NewListOptsRef(&values, validator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
||||||
|
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
||||||
|
return &ListOpts{
|
||||||
|
values: values,
|
||||||
|
validator: validator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts *ListOpts) String() string {
|
||||||
|
return fmt.Sprintf("%v", []string((*opts.values)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set validates if needed the input value and add it to the
|
||||||
|
// internal slice.
|
||||||
|
func (opts *ListOpts) Set(value string) error {
|
||||||
|
if opts.validator != nil {
|
||||||
|
v, err := opts.validator(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
}
|
||||||
|
(*opts.values) = append((*opts.values), value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the specified element from the slice.
|
||||||
|
func (opts *ListOpts) Delete(key string) {
|
||||||
|
for i, k := range *opts.values {
|
||||||
|
if k == key {
|
||||||
|
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMap returns the content of values in a map in order to avoid
|
||||||
|
// duplicates.
|
||||||
|
func (opts *ListOpts) GetMap() map[string]struct{} {
|
||||||
|
ret := make(map[string]struct{})
|
||||||
|
for _, k := range *opts.values {
|
||||||
|
ret[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll returns the values of slice.
|
||||||
|
func (opts *ListOpts) GetAll() []string {
|
||||||
|
return (*opts.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllOrEmpty returns the values of the slice
|
||||||
|
// or an empty slice when there are no values.
|
||||||
|
func (opts *ListOpts) GetAllOrEmpty() []string {
|
||||||
|
v := *opts.values
|
||||||
|
if v == nil {
|
||||||
|
return make([]string, 0)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get checks the existence of the specified key.
|
||||||
|
func (opts *ListOpts) Get(key string) bool {
|
||||||
|
for _, k := range *opts.values {
|
||||||
|
if k == key {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the amount of element in the slice.
|
||||||
|
func (opts *ListOpts) Len() int {
|
||||||
|
return len((*opts.values))
|
||||||
|
}
|
||||||
|
|
||||||
|
//MapOpts holds a map of values and a validation function.
|
||||||
|
type MapOpts struct {
|
||||||
|
values map[string]string
|
||||||
|
validator ValidatorFctType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set validates if needed the input value and add it to the
|
||||||
|
// internal map, by splitting on '='.
|
||||||
|
func (opts *MapOpts) Set(value string) error {
|
||||||
|
if opts.validator != nil {
|
||||||
|
v, err := opts.validator(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
}
|
||||||
|
vals := strings.SplitN(value, "=", 2)
|
||||||
|
if len(vals) == 1 {
|
||||||
|
(opts.values)[vals[0]] = ""
|
||||||
|
} else {
|
||||||
|
(opts.values)[vals[0]] = vals[1]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll returns the values of MapOpts as a map.
|
||||||
|
func (opts *MapOpts) GetAll() map[string]string {
|
||||||
|
return opts.values
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts *MapOpts) String() string {
|
||||||
|
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
||||||
|
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||||
|
if values == nil {
|
||||||
|
values = make(map[string]string)
|
||||||
|
}
|
||||||
|
return &MapOpts{
|
||||||
|
values: values,
|
||||||
|
validator: validator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
||||||
|
type ValidatorFctType func(val string) (string, error)
|
||||||
|
|
||||||
|
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
||||||
|
type ValidatorFctListType func(val string) ([]string, error)
|
||||||
|
|
||||||
|
// ValidateAttach validates that the specified string is a valid attach option.
|
||||||
|
func ValidateAttach(val string) (string, error) {
|
||||||
|
s := strings.ToLower(val)
|
||||||
|
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
||||||
|
if s == str {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateEnv validates an environment variable and returns it.
|
||||||
|
// If no value is specified, it returns the current value using os.Getenv.
|
||||||
|
//
|
||||||
|
// As on ParseEnvFile and related to #16585, environment variable names
|
||||||
|
// are not validate what so ever, it's up to application inside docker
|
||||||
|
// to validate them or not.
|
||||||
|
func ValidateEnv(val string) (string, error) {
|
||||||
|
arr := strings.Split(val, "=")
|
||||||
|
if len(arr) > 1 {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
if !doesEnvExist(val) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateIPAddress validates an Ip address.
|
||||||
|
func ValidateIPAddress(val string) (string, error) {
|
||||||
|
var ip = net.ParseIP(strings.TrimSpace(val))
|
||||||
|
if ip != nil {
|
||||||
|
return ip.String(), nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("%s is not an ip address", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateMACAddress validates a MAC address.
|
||||||
|
func ValidateMACAddress(val string) (string, error) {
|
||||||
|
_, err := net.ParseMAC(strings.TrimSpace(val))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
||||||
|
// A zero length domain is represented by a dot (.).
|
||||||
|
func ValidateDNSSearch(val string) (string, error) {
|
||||||
|
if val = strings.Trim(val, " "); val == "." {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return validateDomain(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDomain(val string) (string, error) {
|
||||||
|
if alphaRegexp.FindString(val) == "" {
|
||||||
|
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||||
|
}
|
||||||
|
ns := domainRegexp.FindSubmatch([]byte(val))
|
||||||
|
if len(ns) > 0 && len(ns[1]) < 255 {
|
||||||
|
return string(ns[1]), nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
||||||
|
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
|
||||||
|
func ValidateExtraHost(val string) (string, error) {
|
||||||
|
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
||||||
|
arr := strings.SplitN(val, ":", 2)
|
||||||
|
if len(arr) != 2 || len(arr[0]) == 0 {
|
||||||
|
return "", fmt.Errorf("bad format for add-host: %q", val)
|
||||||
|
}
|
||||||
|
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
||||||
|
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
||||||
|
// Labels are in the form on key=value.
|
||||||
|
func ValidateLabel(val string) (string, error) {
|
||||||
|
if strings.Count(val, "=") < 1 {
|
||||||
|
return "", fmt.Errorf("bad attribute format: %s", val)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func doesEnvExist(name string) bool {
|
||||||
|
for _, entry := range os.Environ() {
|
||||||
|
parts := strings.SplitN(entry, "=", 2)
|
||||||
|
if parts[0] == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
6
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go
generated
vendored
Normal file
6
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
||||||
|
const DefaultHTTPHost = "localhost"
|
||||||
56
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go
generated
vendored
Normal file
56
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package opts
|
||||||
|
|
||||||
|
// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4.
|
||||||
|
// @jhowardmsft, @swernli.
|
||||||
|
//
|
||||||
|
// On Windows, this mitigates a problem with the default options of running
|
||||||
|
// a docker client against a local docker daemon on TP4.
|
||||||
|
//
|
||||||
|
// What was found that if the default host is "localhost", even if the client
|
||||||
|
// (and daemon as this is local) is not physically on a network, and the DNS
|
||||||
|
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
||||||
|
// exactly one second when connecting to the daemon for calls. For example
|
||||||
|
// using docker run windowsservercore cmd, the CLI will send a create followed
|
||||||
|
// by an attach. You see the delay between the attach finishing and the attach
|
||||||
|
// being seen by the daemon.
|
||||||
|
//
|
||||||
|
// Here's some daemon debug logs with additional debug spew put in. The
|
||||||
|
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
||||||
|
// create call. The POST /attach is the second CLI call. Notice the second
|
||||||
|
// time gap.
|
||||||
|
//
|
||||||
|
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
||||||
|
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
||||||
|
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
||||||
|
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
||||||
|
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
||||||
|
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
||||||
|
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
||||||
|
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
||||||
|
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
||||||
|
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
||||||
|
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
||||||
|
// ... 1 second gap here....
|
||||||
|
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
||||||
|
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
||||||
|
//
|
||||||
|
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
||||||
|
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory,
|
||||||
|
// the Windows networking stack is supposed to resolve "localhost" internally,
|
||||||
|
// without hitting DNS, or even reading the hosts file (which is why localhost
|
||||||
|
// is commented out in the hosts file on Windows).
|
||||||
|
//
|
||||||
|
// We have validated that working around this using the actual IPv4 localhost
|
||||||
|
// address does not cause the delay.
|
||||||
|
//
|
||||||
|
// This does not occur with the docker client built with 1.4.3 on the same
|
||||||
|
// Windows TP4 build, regardless of whether the daemon is built using 1.5.1
|
||||||
|
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
||||||
|
// on a cross-compiled Windows binary (from Linux).
|
||||||
|
//
|
||||||
|
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
||||||
|
// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...'
|
||||||
|
// explicitly.
|
||||||
|
|
||||||
|
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
||||||
|
const DefaultHTTPHost = "127.0.0.1"
|
||||||
1
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
1
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
This code provides helper functions for dealing with archive files.
|
||||||
1049
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
1049
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
112
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
112
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return srcPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific. On Linux, we
|
||||||
|
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||||
|
// a trailing "." or "/" which may be important.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return srcPath + string(filepath.Separator) + include
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
return p, nil // already unix-style
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
err = errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
inode = uint64(s.Ino)
|
||||||
|
|
||||||
|
// Currently go does not fill in the major/minors
|
||||||
|
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||||
|
s.Mode&syscall.S_IFCHR != 0 {
|
||||||
|
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||||
|
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
}
|
||||||
|
return int(s.Uid), int(s.Gid), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func major(device uint64) uint64 {
|
||||||
|
return (device >> 8) & 0xfff
|
||||||
|
}
|
||||||
|
|
||||||
|
func minor(device uint64) uint64 {
|
||||||
|
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
mode := uint32(hdr.Mode & 07777)
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeBlock:
|
||||||
|
mode |= syscall.S_IFBLK
|
||||||
|
case tar.TypeChar:
|
||||||
|
mode |= syscall.S_IFCHR
|
||||||
|
case tar.TypeFifo:
|
||||||
|
mode |= syscall.S_IFIFO
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
70
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
70
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return longpath.AddPrefix(srcPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return filepath.Join(srcPath, include)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
// windows: convert windows style relative path with backslashes
|
||||||
|
// into forward slashes. Since windows does not allow '/' or '\'
|
||||||
|
// in file names, it is mostly safe to replace however we must
|
||||||
|
// check just in case
|
||||||
|
if strings.Contains(p, "/") {
|
||||||
|
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||||
|
}
|
||||||
|
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
perm &= 0755
|
||||||
|
// Add the x bit: make everything +x from windows
|
||||||
|
perm |= 0111
|
||||||
|
|
||||||
|
return perm
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
// no notion of file ownership mapping yet on Windows
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
416
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
416
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
@ -0,0 +1,416 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChangeType represents the change type.
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChangeModify represents the modify operation.
|
||||||
|
ChangeModify = iota
|
||||||
|
// ChangeAdd represents the add operation.
|
||||||
|
ChangeAdd
|
||||||
|
// ChangeDelete represents the delete operation.
|
||||||
|
ChangeDelete
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c ChangeType) String() string {
|
||||||
|
switch c {
|
||||||
|
case ChangeModify:
|
||||||
|
return "C"
|
||||||
|
case ChangeAdd:
|
||||||
|
return "A"
|
||||||
|
case ChangeDelete:
|
||||||
|
return "D"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change represents a change, it wraps the change type and path.
|
||||||
|
// It describes changes of the files in the path respect to the
|
||||||
|
// parent layers. The change could be modify, add, delete.
|
||||||
|
// This is used for layer diff.
|
||||||
|
type Change struct {
|
||||||
|
Path string
|
||||||
|
Kind ChangeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (change *Change) String() string {
|
||||||
|
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// for sort.Sort
|
||||||
|
type changesByPath []Change
|
||||||
|
|
||||||
|
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
|
func (c changesByPath) Len() int { return len(c) }
|
||||||
|
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||||
|
|
||||||
|
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||||
|
// precision, which is problematic when we apply changes via tar
|
||||||
|
// files, we handle this by comparing for exact times, *or* same
|
||||||
|
// second count and either a or b having exactly 0 nanoseconds
|
||||||
|
func sameFsTime(a, b time.Time) bool {
|
||||||
|
return a == b ||
|
||||||
|
(a.Unix() == b.Unix() &&
|
||||||
|
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||||
|
return a.Sec == b.Sec &&
|
||||||
|
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes walks the path rw and determines changes for the files in the path,
|
||||||
|
// with respect to the parent layers
|
||||||
|
func Changes(layers []string, rw string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
changes []Change
|
||||||
|
changedDirs = make(map[string]struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
path, err = filepath.Rel(rw, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
path = filepath.Join(string(os.PathSeparator), path)
|
||||||
|
|
||||||
|
// Skip root
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata
|
||||||
|
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
change := Change{
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find out what kind of modification happened
|
||||||
|
file := filepath.Base(path)
|
||||||
|
// If there is a whiteout, then the file was removed
|
||||||
|
if strings.HasPrefix(file, WhiteoutPrefix) {
|
||||||
|
originalFile := file[len(WhiteoutPrefix):]
|
||||||
|
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||||
|
change.Kind = ChangeDelete
|
||||||
|
} else {
|
||||||
|
// Otherwise, the file was added
|
||||||
|
change.Kind = ChangeAdd
|
||||||
|
|
||||||
|
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||||
|
for _, layer := range layers {
|
||||||
|
stat, err := os.Stat(filepath.Join(layer, path))
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// The file existed in the top layer, so that's a modification
|
||||||
|
|
||||||
|
// However, if it's a directory, maybe it wasn't actually modified.
|
||||||
|
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||||
|
if stat.IsDir() && f.IsDir() {
|
||||||
|
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||||
|
// Both directories are the same, don't record the change
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
change.Kind = ChangeModify
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||||
|
// This block is here to ensure the change is recorded even if the
|
||||||
|
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||||
|
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||||
|
if f.IsDir() {
|
||||||
|
changedDirs[path] = struct{}{}
|
||||||
|
}
|
||||||
|
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||||
|
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||||
|
changedDirs[parent] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record change
|
||||||
|
changes = append(changes, change)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes the information of a file.
|
||||||
|
type FileInfo struct {
|
||||||
|
parent *FileInfo
|
||||||
|
name string
|
||||||
|
stat *system.StatT
|
||||||
|
children map[string]*FileInfo
|
||||||
|
capability []byte
|
||||||
|
added bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookUp looks up the file information of a file.
|
||||||
|
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
parent := info
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||||
|
for _, elem := range pathElements {
|
||||||
|
if elem != "" {
|
||||||
|
child := parent.children[elem]
|
||||||
|
if child == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) path() string {
|
||||||
|
if info.parent == nil {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
return string(os.PathSeparator)
|
||||||
|
}
|
||||||
|
return filepath.Join(info.parent.path(), info.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
|
|
||||||
|
sizeAtEntry := len(*changes)
|
||||||
|
|
||||||
|
if oldInfo == nil {
|
||||||
|
// add
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeAdd,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
info.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We make a copy so we can modify it to detect additions
|
||||||
|
// also, we only recurse on the old dir if the new info is a directory
|
||||||
|
// otherwise any previous delete/change is considered recursive
|
||||||
|
oldChildren := make(map[string]*FileInfo)
|
||||||
|
if oldInfo != nil && info.isDir() {
|
||||||
|
for k, v := range oldInfo.children {
|
||||||
|
oldChildren[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, newChild := range info.children {
|
||||||
|
oldChild, _ := oldChildren[name]
|
||||||
|
if oldChild != nil {
|
||||||
|
// change?
|
||||||
|
oldStat := oldChild.stat
|
||||||
|
newStat := newChild.stat
|
||||||
|
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||||
|
// when copying a file into a container. However, that is not generally a problem
|
||||||
|
// because any content change will change mtime, and any status change should
|
||||||
|
// be visible when actually comparing the stat fields. The only time this
|
||||||
|
// breaks down is if some code intentionally hides a change by setting
|
||||||
|
// back mtime
|
||||||
|
if statDifferent(oldStat, newStat) ||
|
||||||
|
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
||||||
|
change := Change{
|
||||||
|
Path: newChild.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
newChild.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from copy so we can detect deletions
|
||||||
|
delete(oldChildren, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
newChild.addChanges(oldChild, changes)
|
||||||
|
}
|
||||||
|
for _, oldChild := range oldChildren {
|
||||||
|
// delete
|
||||||
|
change := Change{
|
||||||
|
Path: oldChild.path(),
|
||||||
|
Kind: ChangeDelete,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there were changes inside this directory, we need to add it, even if the directory
|
||||||
|
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||||
|
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||||
|
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||||
|
(*changes)[sizeAtEntry] = change
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes add changes to file information.
|
||||||
|
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||||
|
var changes []Change
|
||||||
|
|
||||||
|
info.addChanges(oldInfo, &changes)
|
||||||
|
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRootFileInfo() *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
root := &FileInfo{
|
||||||
|
name: string(os.PathSeparator),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
}
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||||
|
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||||
|
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
)
|
||||||
|
if oldDir == "" {
|
||||||
|
emptyDir, err := ioutil.TempDir("", "empty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.Remove(emptyDir)
|
||||||
|
oldDir = emptyDir
|
||||||
|
}
|
||||||
|
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot.Changes(oldRoot), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||||
|
func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
|
var (
|
||||||
|
size int64
|
||||||
|
sf = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||||
|
file := filepath.Join(newDir, change.Path)
|
||||||
|
fileInfo, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo != nil && !fileInfo.IsDir() {
|
||||||
|
if hasHardlinks(fileInfo) {
|
||||||
|
inode := getIno(fileInfo)
|
||||||
|
if _, ok := sf[inode]; !ok {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
sf[inode] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||||
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
ta := &tarAppender{
|
||||||
|
TarWriter: tar.NewWriter(writer),
|
||||||
|
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||||
|
SeenFiles: make(map[uint64]string),
|
||||||
|
UIDMaps: uidMaps,
|
||||||
|
GIDMaps: gidMaps,
|
||||||
|
}
|
||||||
|
// this buffer is needed for the duration of this piped stream
|
||||||
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
||||||
|
sort.Sort(changesByPath(changes))
|
||||||
|
|
||||||
|
// In general we log errors here but ignore them because
|
||||||
|
// during e.g. a diff operation the container can continue
|
||||||
|
// mutating the filesystem and we can see transient errors
|
||||||
|
// from this
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeDelete {
|
||||||
|
whiteOutDir := filepath.Dir(change.Path)
|
||||||
|
whiteOutBase := filepath.Base(change.Path)
|
||||||
|
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||||
|
timestamp := time.Now()
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: whiteOut[1:],
|
||||||
|
Size: 0,
|
||||||
|
ModTime: timestamp,
|
||||||
|
AccessTime: timestamp,
|
||||||
|
ChangeTime: timestamp,
|
||||||
|
}
|
||||||
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path := filepath.Join(dir, change.Path)
|
||||||
|
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||||
|
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to check the error on Close.
|
||||||
|
if err := ta.TarWriter.Close(); err != nil {
|
||||||
|
logrus.Debugf("Can't close layer: %s", err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
logrus.Debugf("failed close Changes writer: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
285
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
285
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||||
|
// method in general returns the entire contents of two directory trees, we
|
||||||
|
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||||
|
// fact that getdents(2) returns the inode of each file in the directory being
|
||||||
|
// walked, which, when walking two trees in parallel to generate a list of
|
||||||
|
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||||
|
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||||
|
// images.
|
||||||
|
type walker struct {
|
||||||
|
dir1 string
|
||||||
|
dir2 string
|
||||||
|
root1 *FileInfo
|
||||||
|
root2 *FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFileInfoForChanges returns a complete representation of the trees
|
||||||
|
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||||
|
// leaf where the inode and device numbers are an exact match between dir1
|
||||||
|
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||||
|
// to generating a list of changes between the two directories, as it does not
|
||||||
|
// reflect the full contents.
|
||||||
|
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||||
|
w := &walker{
|
||||||
|
dir1: dir1,
|
||||||
|
dir2: dir2,
|
||||||
|
root1: newRootFileInfo(),
|
||||||
|
root2: newRootFileInfo(),
|
||||||
|
}
|
||||||
|
|
||||||
|
i1, err := os.Lstat(w.dir1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
i2, err := os.Lstat(w.dir2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.walk("/", i1, i2); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.root1, w.root2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||||
|
// being constructed, register this file with the tree.
|
||||||
|
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||||
|
if fi == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := root.LookUp(filepath.Dir(path))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
||||||
|
}
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(path),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
cpath := filepath.Join(dir, path)
|
||||||
|
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = stat
|
||||||
|
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||||
|
parent.children[info.name] = info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||||
|
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||||
|
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||||
|
// Register these nodes with the return trees, unless we're still at the
|
||||||
|
// (already-created) roots:
|
||||||
|
if path != "/" {
|
||||||
|
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
is1Dir := i1 != nil && i1.IsDir()
|
||||||
|
is2Dir := i2 != nil && i2.IsDir()
|
||||||
|
|
||||||
|
sameDevice := false
|
||||||
|
if i1 != nil && i2 != nil {
|
||||||
|
si1 := i1.Sys().(*syscall.Stat_t)
|
||||||
|
si2 := i2.Sys().(*syscall.Stat_t)
|
||||||
|
if si1.Dev == si2.Dev {
|
||||||
|
sameDevice = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||||
|
if !is1Dir && !is2Dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the names of all the files contained in both directories being walked:
|
||||||
|
var names1, names2 []nameIno
|
||||||
|
if is1Dir {
|
||||||
|
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have lists of the files contained in both parallel directories, sorted
|
||||||
|
// in the same order. Walk them in parallel, generating a unique merged list
|
||||||
|
// of all items present in either or both directories.
|
||||||
|
var names []string
|
||||||
|
ix1 := 0
|
||||||
|
ix2 := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ix1 >= len(names1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ix2 >= len(names2) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ni1 := names1[ix1]
|
||||||
|
ni2 := names2[ix2]
|
||||||
|
|
||||||
|
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||||
|
case -1: // ni1 < ni2 -- advance ni1
|
||||||
|
// we will not encounter ni1 in names2
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
ix1++
|
||||||
|
case 0: // ni1 == ni2
|
||||||
|
if ni1.ino != ni2.ino || !sameDevice {
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
}
|
||||||
|
ix1++
|
||||||
|
ix2++
|
||||||
|
case 1: // ni1 > ni2 -- advance ni2
|
||||||
|
// we will not encounter ni2 in names1
|
||||||
|
names = append(names, ni2.name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ix1 < len(names1) {
|
||||||
|
names = append(names, names1[ix1].name)
|
||||||
|
ix1++
|
||||||
|
}
|
||||||
|
for ix2 < len(names2) {
|
||||||
|
names = append(names, names2[ix2].name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each of the names present in either or both of the directories being
|
||||||
|
// iterated, stat the name under each root, and recurse the pair of them:
|
||||||
|
for _, name := range names {
|
||||||
|
fname := filepath.Join(path, name)
|
||||||
|
var cInfo1, cInfo2 os.FileInfo
|
||||||
|
if is1Dir {
|
||||||
|
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||||
|
type nameIno struct {
|
||||||
|
name string
|
||||||
|
ino uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nameInoSlice []nameIno
|
||||||
|
|
||||||
|
func (s nameInoSlice) Len() int { return len(s) }
|
||||||
|
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||||
|
|
||||||
|
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||||
|
// numbers further up the stack when reading directory contents. Unlike
|
||||||
|
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||||
|
// list of {filename,inode} pairs.
|
||||||
|
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
|
var (
|
||||||
|
size = 100
|
||||||
|
buf = make([]byte, 4096)
|
||||||
|
nbuf int
|
||||||
|
bufp int
|
||||||
|
nb int
|
||||||
|
)
|
||||||
|
|
||||||
|
f, err := os.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||||
|
for {
|
||||||
|
// Refill the buffer if necessary
|
||||||
|
if bufp >= nbuf {
|
||||||
|
bufp = 0
|
||||||
|
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||||
|
if nbuf < 0 {
|
||||||
|
nbuf = 0
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("readdirent", err)
|
||||||
|
}
|
||||||
|
if nbuf <= 0 {
|
||||||
|
break // EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain the buffer
|
||||||
|
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||||
|
bufp += nb
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := nameInoSlice(names)
|
||||||
|
sort.Sort(sl)
|
||||||
|
return sl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
||||||
|
// which returns {name,inode} pairs instead of just names.
|
||||||
|
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||||
|
origlen := len(buf)
|
||||||
|
for len(buf) > 0 {
|
||||||
|
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
|
buf = buf[dirent.Reclen:]
|
||||||
|
if dirent.Ino == 0 { // File absent in directory.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
var name = string(bytes[0:clen(bytes[:])])
|
||||||
|
if name == "." || name == ".." { // Useless names
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names = append(names, nameIno{name, dirent.Ino})
|
||||||
|
}
|
||||||
|
return origlen - len(buf), names
|
||||||
|
}
|
||||||
|
|
||||||
|
func clen(n []byte) int {
|
||||||
|
for i := 0; i < len(n); i++ {
|
||||||
|
if n[i] == 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(n)
|
||||||
|
}
|
||||||
97
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
97
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
err1, err2 error
|
||||||
|
errs = make(chan error, 2)
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
oldRoot, err1 = collectFileInfo(oldDir)
|
||||||
|
errs <- err1
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
newRoot, err2 = collectFileInfo(newDir)
|
||||||
|
errs <- err2
|
||||||
|
}()
|
||||||
|
|
||||||
|
// block until both routines have returned
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errs; err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldRoot, newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||||
|
root := newRootFileInfo()
|
||||||
|
|
||||||
|
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
relPath, err := filepath.Rel(sourceDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||||
|
|
||||||
|
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||||
|
// Temporary workaround. If the returned path starts with two backslashes,
|
||||||
|
// trim it down to a single backslash. Only relevant on Windows.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.HasPrefix(relPath, `\\`) {
|
||||||
|
relPath = relPath[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if relPath == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parent := root.LookUp(filepath.Dir(relPath))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(relPath),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = s
|
||||||
|
|
||||||
|
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||||
|
|
||||||
|
parent.children[info.name] = info
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
36
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
36
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.UID() != newStat.UID() ||
|
||||||
|
oldStat.GID() != newStat.GID() ||
|
||||||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||||
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) uint64 {
|
||||||
|
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||||
|
}
|
||||||
30
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
30
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.ModTime() != newStat.ModTime() ||
|
||||||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) (inode uint64) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
458
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
458
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
@ -0,0 +1,458 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors used or returned by this file.
|
||||||
|
var (
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
ErrDirNotExists = errors.New("no such directory")
|
||||||
|
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||||
|
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||||
|
// processing using any utility functions from the path or filepath stdlib
|
||||||
|
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||||
|
// path (from before being processed by utility functions from the path or
|
||||||
|
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||||
|
// path already ends in a `.` path segment, then another is not added. If the
|
||||||
|
// clean path already ends in a path separator, then another is not added.
|
||||||
|
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||||
|
// Ensure paths are in platform semantics
|
||||||
|
cleanedPath = normalizePath(cleanedPath)
|
||||||
|
originalPath = normalizePath(originalPath)
|
||||||
|
|
||||||
|
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) {
|
||||||
|
// Add a separator if it doesn't already end with one (a cleaned
|
||||||
|
// path would only end in a separator if it is the root).
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
cleanedPath += "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertsDirectory returns whether the given path is
|
||||||
|
// asserted to be a directory, i.e., the path ends with
|
||||||
|
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||||
|
func assertsDirectory(path string) bool {
|
||||||
|
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTrailingPathSeparator returns whether the given
|
||||||
|
// path ends with the system's path separator character.
|
||||||
|
func hasTrailingPathSeparator(path string) bool {
|
||||||
|
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// specifiesCurrentDir returns whether the given path specifies
|
||||||
|
// a "current directory", i.e., the last path segment is `.`.
|
||||||
|
func specifiesCurrentDir(path string) bool {
|
||||||
|
return filepath.Base(path) == "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitPathDirEntry splits the given path between its directory name and its
|
||||||
|
// basename by first cleaning the path but preserves a trailing "." if the
|
||||||
|
// original path specified the current directory.
|
||||||
|
func SplitPathDirEntry(path string) (dir, base string) {
|
||||||
|
cleanedPath := filepath.Clean(normalizePath(path))
|
||||||
|
|
||||||
|
if specifiesCurrentDir(path) {
|
||||||
|
cleanedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||||
|
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||||
|
// asserted to be a directory but exists as another type of file.
|
||||||
|
//
|
||||||
|
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||||
|
// requires a directory as the source path. TarResource accepts either a
|
||||||
|
// directory or a file path and correctly sets the Tar options.
|
||||||
|
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
||||||
|
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResourceRebase is like TarResource but renames the first path element of
|
||||||
|
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||||
|
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
||||||
|
sourcePath = normalizePath(sourcePath)
|
||||||
|
if _, err = os.Lstat(sourcePath); err != nil {
|
||||||
|
// Catches the case where the source does not exist or is not a
|
||||||
|
// directory if asserted to be a directory, as this also causes an
|
||||||
|
// error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Separate the source path between it's directory and
|
||||||
|
// the entry in that directory which we are archiving.
|
||||||
|
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||||
|
|
||||||
|
filter := []string{sourceBase}
|
||||||
|
|
||||||
|
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||||
|
|
||||||
|
return TarWithOptions(sourceDir, &TarOptions{
|
||||||
|
Compression: Uncompressed,
|
||||||
|
IncludeFiles: filter,
|
||||||
|
IncludeSourceDir: true,
|
||||||
|
RebaseNames: map[string]string{
|
||||||
|
sourceBase: rebaseName,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfo holds basic info about the source
|
||||||
|
// or destination path of a copy operation.
|
||||||
|
type CopyInfo struct {
|
||||||
|
Path string
|
||||||
|
Exists bool
|
||||||
|
IsDir bool
|
||||||
|
RebaseName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the source of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path. A source path
|
||||||
|
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||||
|
// on Unix). As it is to be a copy source, the path must exist.
|
||||||
|
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||||
|
// normalize the file path and then evaluate the symbol link
|
||||||
|
// we will use the target file instead of the symbol link if
|
||||||
|
// followLink is set
|
||||||
|
path = normalizePath(path)
|
||||||
|
|
||||||
|
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Lstat(resolvedPath)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{
|
||||||
|
Path: resolvedPath,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
RebaseName: rebaseName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the destination of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path.
|
||||||
|
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||||
|
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||||
|
path = normalizePath(path)
|
||||||
|
originalPath := path
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
|
||||||
|
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||||
|
// The path exists and is not a symlink.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// While the path is a symlink.
|
||||||
|
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||||
|
if n > maxSymlinkIter {
|
||||||
|
// Don't follow symlinks more than this arbitrary number of times.
|
||||||
|
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path is a symbolic link. We need to evaluate it so that the
|
||||||
|
// destination of the copy operation is the link target and not the
|
||||||
|
// link itself. This is notably different than CopyInfoSourcePath which
|
||||||
|
// only evaluates symlinks before the last appearing path separator.
|
||||||
|
// Also note that it is okay if the last path element is a broken
|
||||||
|
// symlink as the copy operation should create the target.
|
||||||
|
var linkTarget string
|
||||||
|
|
||||||
|
linkTarget, err = os.Readlink(path)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !system.IsAbs(linkTarget) {
|
||||||
|
// Join with the parent directory.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
path = linkTarget
|
||||||
|
stat, err = os.Lstat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// It's okay if the destination path doesn't exist. We can still
|
||||||
|
// continue the copy operation if the parent directory exists.
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure destination parent dir exists.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
|
||||||
|
parentDirStat, err := os.Lstat(dstParent)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
if !parentDirStat.IsDir() {
|
||||||
|
return CopyInfo{}, ErrNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{Path: path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path exists after resolving symlinks.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||||
|
// contain the archived resource described by srcInfo, to the destination
|
||||||
|
// described by dstInfo. Returns the possibly modified content archive along
|
||||||
|
// with the path to the destination directory which it should be extracted to.
|
||||||
|
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||||
|
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||||
|
|
||||||
|
// Separate the destination path between its directory and base
|
||||||
|
// components in case the source archive contents need to be rebased.
|
||||||
|
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||||
|
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case dstInfo.Exists && dstInfo.IsDir:
|
||||||
|
// The destination exists as a directory. No alteration
|
||||||
|
// to srcContent is needed as its contents can be
|
||||||
|
// simply extracted to the destination directory.
|
||||||
|
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||||
|
case dstInfo.Exists && srcInfo.IsDir:
|
||||||
|
// The destination exists as some type of file and the source
|
||||||
|
// content is a directory. This is an error condition since
|
||||||
|
// you cannot copy a directory to an existing file location.
|
||||||
|
return "", nil, ErrCannotCopyDir
|
||||||
|
case dstInfo.Exists:
|
||||||
|
// The destination exists as some type of file and the source content
|
||||||
|
// is also a file. The source content entry will have to be renamed to
|
||||||
|
// have a basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case srcInfo.IsDir:
|
||||||
|
// The destination does not exist and the source content is an archive
|
||||||
|
// of a directory. The archive should be extracted to the parent of
|
||||||
|
// the destination path instead, and when it is, the directory that is
|
||||||
|
// created as a result should take the name of the destination path.
|
||||||
|
// The source content entries will have to be renamed to have a
|
||||||
|
// basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case assertsDirectory(dstInfo.Path):
|
||||||
|
// The destination does not exist and is asserted to be created as a
|
||||||
|
// directory, but the source content is not a directory. This is an
|
||||||
|
// error condition since you cannot create a directory from a file
|
||||||
|
// source.
|
||||||
|
return "", nil, ErrDirNotExists
|
||||||
|
default:
|
||||||
|
// The last remaining case is when the destination does not exist, is
|
||||||
|
// not asserted to be a directory, and the source content is not an
|
||||||
|
// archive of a directory. It this case, the destination file will need
|
||||||
|
// to be created when the archive is extracted and the source content
|
||||||
|
// entry will have to be renamed to have a basename which matches the
|
||||||
|
// destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||||
|
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||||
|
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
||||||
|
if oldBase == string(os.PathSeparator) {
|
||||||
|
// If oldBase specifies the root directory, use an empty string as
|
||||||
|
// oldBase instead so that newBase doesn't replace the path separator
|
||||||
|
// that all paths will start with.
|
||||||
|
oldBase = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
rebased, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
srcTar := tar.NewReader(srcContent)
|
||||||
|
rebasedTar := tar.NewWriter(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
hdr, err := srcTar.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// Signals end of archive.
|
||||||
|
rebasedTar.Close()
|
||||||
|
w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||||
|
|
||||||
|
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return rebased
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyResource performs an archive copy from the given source path to the
|
||||||
|
// given destination path. The source path MUST exist and the destination
|
||||||
|
// path's parent directory must exist.
|
||||||
|
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||||
|
var (
|
||||||
|
srcInfo CopyInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcPath = normalizePath(srcPath)
|
||||||
|
dstPath = normalizePath(dstPath)
|
||||||
|
|
||||||
|
// Clean the source and destination paths.
|
||||||
|
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||||
|
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||||
|
|
||||||
|
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := TarResource(srcInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer content.Close()
|
||||||
|
|
||||||
|
return CopyTo(content, srcInfo, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo handles extracting the given content whose
|
||||||
|
// entries should be sourced from srcInfo to dstPath.
|
||||||
|
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
||||||
|
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||||
|
// ensure that at least the parent directory exists.
|
||||||
|
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer copyArchive.Close()
|
||||||
|
|
||||||
|
options := &TarOptions{
|
||||||
|
NoLchown: true,
|
||||||
|
NoOverwriteDirNonDir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Untar(copyArchive, dstDir, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||||
|
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||||
|
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||||
|
// but return symbol link file itself without resolving.
|
||||||
|
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||||
|
if followLink {
|
||||||
|
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||||
|
} else {
|
||||||
|
dirPath, basePath := filepath.Split(path)
|
||||||
|
|
||||||
|
// if not follow symbol link, then resolve symbol link of parent dir
|
||||||
|
var resolvedDirPath string
|
||||||
|
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||||
|
// we can manually join it with the base path element.
|
||||||
|
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||||
|
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||||
|
// return completed resolved path and rebased file name
|
||||||
|
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||||
|
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||||
|
// we can manually join it with them
|
||||||
|
var rebaseName string
|
||||||
|
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
// In the case where the path had a trailing separator and a symlink
|
||||||
|
// evaluation has changed the last path component, we will need to
|
||||||
|
// rebase the name in the archive that is being copied to match the
|
||||||
|
// originally requested name.
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName
|
||||||
|
}
|
||||||
11
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
11
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.ToSlash(path)
|
||||||
|
}
|
||||||
9
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
9
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.FromSlash(path)
|
||||||
|
}
|
||||||
279
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
279
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
||||||
|
tr := tar.NewReader(layer)
|
||||||
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
|
var dirs []*tar.Header
|
||||||
|
unpackedPaths := make(map[string]struct{})
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
aufsTempdir := ""
|
||||||
|
aufsHardlinks := make(map[string]*tar.Header)
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size += hdr.Size
|
||||||
|
|
||||||
|
// Normalize name, for safety and for a simple is-root check
|
||||||
|
hdr.Name = filepath.Clean(hdr.Name)
|
||||||
|
|
||||||
|
// Windows does not support filenames with colons in them. Ignore
|
||||||
|
// these files. This is not a problem though (although it might
|
||||||
|
// appear that it is). Let's suppose a client is running docker pull.
|
||||||
|
// The daemon it points to is Windows. Would it make sense for the
|
||||||
|
// client to be doing a docker pull Ubuntu for example (which has files
|
||||||
|
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||||
|
// not as it would really only make sense that they were pulling a
|
||||||
|
// Windows image. However, for development, it is necessary to be able
|
||||||
|
// to pull Linux images which are in the repository.
|
||||||
|
//
|
||||||
|
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||||
|
// specific or Linux-specific, this warning should be changed to an error
|
||||||
|
// to cater for the situation where someone does manage to upload a Linux
|
||||||
|
// image but have it tagged as Windows inadvertently.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.Contains(hdr.Name, ":") {
|
||||||
|
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||||
|
// Not the root directory, ensure that the parent directory exists.
|
||||||
|
// This happened in some tests where an image had a tarfile without any
|
||||||
|
// parent directories.
|
||||||
|
parent := filepath.Dir(hdr.Name)
|
||||||
|
parentPath := filepath.Join(dest, parent)
|
||||||
|
|
||||||
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
|
err = system.MkdirAll(parentPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata dirs
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||||
|
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||||
|
// We don't want this directory, but we need the files in them so that
|
||||||
|
// such hardlinks can be resolved.
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||||
|
basename := filepath.Base(hdr.Name)
|
||||||
|
aufsHardlinks[basename] = hdr
|
||||||
|
if aufsTempdir == "" {
|
||||||
|
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(aufsTempdir)
|
||||||
|
}
|
||||||
|
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Name != WhiteoutOpaqueDir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
rel, err := filepath.Rel(dest, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
|
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||||
|
}
|
||||||
|
base := filepath.Base(path)
|
||||||
|
|
||||||
|
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if base == WhiteoutOpaqueDir {
|
||||||
|
_, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil // parent was deleted
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if path == dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, exists := unpackedPaths[path]; !exists {
|
||||||
|
err := os.RemoveAll(path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
if err := os.RemoveAll(originalPath); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If path exits we almost always just want to remove and replace it.
|
||||||
|
// The only exception is when it is a directory *and* the file from
|
||||||
|
// the layer is also a directory. Then we want to merge them (i.e.
|
||||||
|
// just apply the metadata from the layer).
|
||||||
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trBuf.Reset(tr)
|
||||||
|
srcData := io.Reader(trBuf)
|
||||||
|
srcHdr := hdr
|
||||||
|
|
||||||
|
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||||
|
// we manually retarget these into the temporary files we extracted them into
|
||||||
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||||
|
linkBasename := filepath.Base(hdr.Linkname)
|
||||||
|
srcHdr = aufsHardlinks[linkBasename]
|
||||||
|
if srcHdr == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||||
|
}
|
||||||
|
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
|
srcData = tmpFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the options contain a uid & gid maps, convert header uid/gid
|
||||||
|
// entries using the maps such that lchown sets the proper mapped
|
||||||
|
// uid/gid after writing the file. We only perform this mapping if
|
||||||
|
// the file isn't already owned by the remapped root UID or GID, as
|
||||||
|
// that specific uid/gid has no mapping from container -> host, and
|
||||||
|
// those files already have the proper ownership for inside the
|
||||||
|
// container.
|
||||||
|
if srcHdr.Uid != remappedRootUID {
|
||||||
|
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Uid = xUID
|
||||||
|
}
|
||||||
|
if srcHdr.Gid != remappedRootGID {
|
||||||
|
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Gid = xGID
|
||||||
|
}
|
||||||
|
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory mtimes must be handled at the end to avoid further
|
||||||
|
// file creation in them to modify the directory mtime
|
||||||
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
|
dirs = append(dirs, hdr)
|
||||||
|
}
|
||||||
|
unpackedPaths[path] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hdr := range dirs {
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
|
// and applies it to the directory `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||||
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
|
// can only be uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||||
|
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
|
// We need to be able to set any perms
|
||||||
|
oldmask, err := system.Umask(0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||||
|
|
||||||
|
if decompress {
|
||||||
|
layer, err = DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnpackLayer(dest, layer, options)
|
||||||
|
}
|
||||||
97
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
97
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Simple tool to create an archive stream from an old and new directory
|
||||||
|
//
|
||||||
|
// By default it will stream the comparison of two temporary directories with junk files
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flDebug = flag.Bool("D", false, "debugging output")
|
||||||
|
flNewDir = flag.String("newdir", "", "")
|
||||||
|
flOldDir = flag.String("olddir", "", "")
|
||||||
|
log = logrus.New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||||
|
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
log.Out = os.Stderr
|
||||||
|
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
}
|
||||||
|
var newDir, oldDir string
|
||||||
|
|
||||||
|
if len(*flNewDir) == 0 {
|
||||||
|
var err error
|
||||||
|
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(newDir)
|
||||||
|
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newDir = *flNewDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*flOldDir) == 0 {
|
||||||
|
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(oldDir)
|
||||||
|
} else {
|
||||||
|
oldDir = *flOldDir
|
||||||
|
}
|
||||||
|
|
||||||
|
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := archive.ExportChanges(newDir, changes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer a.Close()
|
||||||
|
|
||||||
|
i, err := io.Copy(os.Stdout, a)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||||
|
fileData := []byte("fooo")
|
||||||
|
for n := 0; n < numberOfFiles; n++ {
|
||||||
|
fileName := fmt.Sprintf("file-%d", n)
|
||||||
|
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if makeLinks {
|
||||||
|
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
totalSize := numberOfFiles * len(fileData)
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
||||||
16
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
16
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
if time.IsZero() {
|
||||||
|
// Return UTIME_OMIT special value
|
||||||
|
ts.Sec = 0
|
||||||
|
ts.Nsec = ((1 << 30) - 2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(time.UnixNano())
|
||||||
|
}
|
||||||
16
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
nsec := int64(0)
|
||||||
|
if !time.IsZero() {
|
||||||
|
nsec = time.UnixNano()
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(nsec)
|
||||||
|
}
|
||||||
23
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
23
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||||
|
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||||
|
// filesystems these files are generated/handled on tar creation/extraction.
|
||||||
|
|
||||||
|
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||||
|
// filename this means that file has been removed from the base layer.
|
||||||
|
const WhiteoutPrefix = ".wh."
|
||||||
|
|
||||||
|
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||||
|
// for removing an actual file. Normally these files are excluded from exported
|
||||||
|
// archives.
|
||||||
|
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||||
|
|
||||||
|
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||||
|
// layers. Normally these should not go into exported archives and all changed
|
||||||
|
// hardlinks should be copied to the top layer.
|
||||||
|
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||||
|
|
||||||
|
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||||
|
// readdir calls to this directory do not follow to lower layers.
|
||||||
|
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
||||||
59
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
59
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate generates a new archive from the content provided
|
||||||
|
// as input.
|
||||||
|
//
|
||||||
|
// `files` is a sequence of path/content pairs. A new file is
|
||||||
|
// added to the archive for each pair.
|
||||||
|
// If the last pair is incomplete, the file is created with an
|
||||||
|
// empty content. For example:
|
||||||
|
//
|
||||||
|
// Generate("foo.txt", "hello world", "emptyfile")
|
||||||
|
//
|
||||||
|
// The above call will return an archive with 2 files:
|
||||||
|
// * ./foo.txt with content "hello world"
|
||||||
|
// * ./empty with empty content
|
||||||
|
//
|
||||||
|
// FIXME: stream content instead of buffering
|
||||||
|
// FIXME: specify permissions and other archive metadata
|
||||||
|
func Generate(input ...string) (Archive, error) {
|
||||||
|
files := parseStringPairs(input...)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
for _, file := range files {
|
||||||
|
name, content := file[0], file[1]
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: name,
|
||||||
|
Size: int64(len(content)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(content)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ioutil.NopCloser(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStringPairs(input ...string) (output [][2]string) {
|
||||||
|
output = make([][2]string, 0, len(input)/2+1)
|
||||||
|
for i := 0; i < len(input); i += 2 {
|
||||||
|
var pair [2]string
|
||||||
|
pair[0] = input[i]
|
||||||
|
if i+1 < len(input) {
|
||||||
|
pair[1] = input[i+1]
|
||||||
|
}
|
||||||
|
output = append(output, pair)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
279
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
279
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"text/scanner"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// exclusion return true if the specified pattern is an exclusion
|
||||||
|
func exclusion(pattern string) bool {
|
||||||
|
return pattern[0] == '!'
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty return true if the specified pattern is empty
|
||||||
|
func empty(pattern string) bool {
|
||||||
|
return pattern == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanPatterns takes a slice of patterns returns a new
|
||||||
|
// slice of patterns cleaned with filepath.Clean, stripped
|
||||||
|
// of any empty patterns and lets the caller know whether the
|
||||||
|
// slice contains any exception patterns (prefixed with !).
|
||||||
|
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
||||||
|
// Loop over exclusion patterns and:
|
||||||
|
// 1. Clean them up.
|
||||||
|
// 2. Indicate whether we are dealing with any exception rules.
|
||||||
|
// 3. Error if we see a single exclusion marker on it's own (!).
|
||||||
|
cleanedPatterns := []string{}
|
||||||
|
patternDirs := [][]string{}
|
||||||
|
exceptions := false
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
// Eliminate leading and trailing whitespace.
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if empty(pattern) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if exclusion(pattern) {
|
||||||
|
if len(pattern) == 1 {
|
||||||
|
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
||||||
|
}
|
||||||
|
exceptions = true
|
||||||
|
}
|
||||||
|
pattern = filepath.Clean(pattern)
|
||||||
|
cleanedPatterns = append(cleanedPatterns, pattern)
|
||||||
|
if exclusion(pattern) {
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPatterns, patternDirs, exceptions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
patterns, patDirs, _, err := CleanPatterns(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return OptimizedMatches(file, patterns, patDirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||||
|
// It will assume that the inputs have been preprocessed and therefore the function
|
||||||
|
// doesn't need to do as much error checking and clean-up. This was done to avoid
|
||||||
|
// repeating these steps on each file being checked during the archive process.
|
||||||
|
// The more generic fileutils.Matches() can't make these assumptions.
|
||||||
|
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
||||||
|
matched := false
|
||||||
|
parentPath := filepath.Dir(file)
|
||||||
|
parentPathDirs := strings.Split(parentPath, "/")
|
||||||
|
|
||||||
|
for i, pattern := range patterns {
|
||||||
|
negative := false
|
||||||
|
|
||||||
|
if exclusion(pattern) {
|
||||||
|
negative = true
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := regexpMatch(pattern, file)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match && parentPath != "." {
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
if len(patDirs[i]) <= len(parentPathDirs) {
|
||||||
|
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
|
||||||
|
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !negative
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
logrus.Debugf("Skipping excluded path: %s", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// regexpMatch tries to match the logic of filepath.Match but
|
||||||
|
// does so using regexp logic. We do this so that we can expand the
|
||||||
|
// wildcard set to include other things, like "**" to mean any number
|
||||||
|
// of directories. This means that we should be backwards compatible
|
||||||
|
// with filepath.Match(). We'll end up supporting more stuff, due to
|
||||||
|
// the fact that we're using regexp, but that's ok - it does no harm.
|
||||||
|
func regexpMatch(pattern, path string) (bool, error) {
|
||||||
|
regStr := "^"
|
||||||
|
|
||||||
|
// Do some syntax checking on the pattern.
|
||||||
|
// filepath's Match() has some really weird rules that are inconsistent
|
||||||
|
// so instead of trying to dup their logic, just call Match() for its
|
||||||
|
// error state and if there is an error in the pattern return it.
|
||||||
|
// If this becomes an issue we can remove this since its really only
|
||||||
|
// needed in the error (syntax) case - which isn't really critical.
|
||||||
|
if _, err := filepath.Match(pattern, path); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the pattern and convert it to a regexp.
|
||||||
|
// We use a scanner so we can support utf-8 chars.
|
||||||
|
var scan scanner.Scanner
|
||||||
|
scan.Init(strings.NewReader(pattern))
|
||||||
|
|
||||||
|
sl := string(os.PathSeparator)
|
||||||
|
escSL := sl
|
||||||
|
if sl == `\` {
|
||||||
|
escSL += `\`
|
||||||
|
}
|
||||||
|
|
||||||
|
for scan.Peek() != scanner.EOF {
|
||||||
|
ch := scan.Next()
|
||||||
|
|
||||||
|
if ch == '*' {
|
||||||
|
if scan.Peek() == '*' {
|
||||||
|
// is some flavor of "**"
|
||||||
|
scan.Next()
|
||||||
|
|
||||||
|
if scan.Peek() == scanner.EOF {
|
||||||
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
|
regStr += ".*"
|
||||||
|
} else {
|
||||||
|
// is "**"
|
||||||
|
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Treat **/ as ** so eat the "/"
|
||||||
|
if string(scan.Peek()) == sl {
|
||||||
|
scan.Next()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// is "*" so map it to anything but "/"
|
||||||
|
regStr += "[^" + escSL + "]*"
|
||||||
|
}
|
||||||
|
} else if ch == '?' {
|
||||||
|
// "?" is any char except "/"
|
||||||
|
regStr += "[^" + escSL + "]"
|
||||||
|
} else if strings.Index(".$", string(ch)) != -1 {
|
||||||
|
// Escape some regexp special chars that have no meaning
|
||||||
|
// in golang's filepath.Match
|
||||||
|
regStr += `\` + string(ch)
|
||||||
|
} else if ch == '\\' {
|
||||||
|
// escape next char. Note that a trailing \ in the pattern
|
||||||
|
// will be left alone (but need to escape it)
|
||||||
|
if sl == `\` {
|
||||||
|
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||||
|
// and then just continue because filepath.Match on
|
||||||
|
// Windows doesn't allow escaping at all
|
||||||
|
regStr += escSL
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if scan.Peek() != scanner.EOF {
|
||||||
|
regStr += `\` + string(scan.Next())
|
||||||
|
} else {
|
||||||
|
regStr += `\`
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
regStr += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regStr += "$"
|
||||||
|
|
||||||
|
res, err := regexp.MatchString(regStr, path)
|
||||||
|
|
||||||
|
// Map regexp's error to filepath's so no one knows we're not using filepath
|
||||||
|
if err != nil {
|
||||||
|
err = filepath.ErrBadPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
|
// on src or an error occurs. It verifies src exists and remove
|
||||||
|
// the dst if it exists.
|
||||||
|
func CopyFile(src, dst string) (int64, error) {
|
||||||
|
cleanSrc := filepath.Clean(src)
|
||||||
|
cleanDst := filepath.Clean(dst)
|
||||||
|
if cleanSrc == cleanDst {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
sf, err := os.Open(cleanSrc)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer sf.Close()
|
||||||
|
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
df, err := os.Create(cleanDst)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer df.Close()
|
||||||
|
return io.Copy(df, sf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||||
|
// The target of the symbolic link may not be a file.
|
||||||
|
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||||
|
var realPath string
|
||||||
|
var err error
|
||||||
|
if realPath, err = filepath.Abs(path); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
realPathInfo, err := os.Stat(realPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||||
|
}
|
||||||
|
if !realPathInfo.Mode().IsDir() {
|
||||||
|
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||||
|
}
|
||||||
|
return realPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||||
|
func CreateIfNotExists(path string, isDir bool) error {
|
||||||
|
if _, err := os.Stat(path); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if isDir {
|
||||||
|
return os.MkdirAll(path, 0755)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
22
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
22
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// +build linux freebsd
|
||||||
|
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||||
|
// reading it via /proc filesystem.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||||
|
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||||
|
} else {
|
||||||
|
return len(fds)
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
7
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
7
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||||
|
// on Windows.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
39
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
generated
vendored
Normal file
39
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package homedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key returns the env var name for the user's home dir based on
|
||||||
|
// the platform being run on
|
||||||
|
func Key() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return "USERPROFILE"
|
||||||
|
}
|
||||||
|
return "HOME"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the home directory of the current user with the help of
|
||||||
|
// environment variables depending on the target operating system.
|
||||||
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
|
func Get() string {
|
||||||
|
home := os.Getenv(Key())
|
||||||
|
if home == "" && runtime.GOOS != "windows" {
|
||||||
|
if u, err := user.CurrentUser(); err == nil {
|
||||||
|
return u.Home
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return home
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||||
|
// in the native shell of the platform running on.
|
||||||
|
func GetShortcutString() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return "%USERPROFILE%" // be careful while using in format functions
|
||||||
|
}
|
||||||
|
return "~"
|
||||||
|
}
|
||||||
195
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
195
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IDMap contains a single entry for user namespace range remapping. An array
|
||||||
|
// of IDMap entries represents the structure that will be provided to the Linux
|
||||||
|
// kernel for creating a user namespace.
|
||||||
|
type IDMap struct {
|
||||||
|
ContainerID int `json:"container_id"`
|
||||||
|
HostID int `json:"host_id"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type subIDRange struct {
|
||||||
|
Start int
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ranges []subIDRange
|
||||||
|
|
||||||
|
func (e ranges) Len() int { return len(e) }
|
||||||
|
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||||
|
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||||
|
|
||||||
|
const (
|
||||||
|
subuidFileName string = "/etc/subuid"
|
||||||
|
subgidFileName string = "/etc/subgid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||||
|
// directories along the path exist, no change of ownership will be performed
|
||||||
|
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||||
|
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||||
|
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
|
var uid, gid int
|
||||||
|
|
||||||
|
if uidMap != nil {
|
||||||
|
xUID, err := ToHost(0, uidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
uid = xUID
|
||||||
|
}
|
||||||
|
if gidMap != nil {
|
||||||
|
xGID, err := ToHost(0, gidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid = xGID
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToContainer takes an id mapping, and uses it to translate a
|
||||||
|
// host ID to the remapped ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id
|
||||||
|
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||||
|
contID := m.ContainerID + (hostID - m.HostID)
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToHost takes an id mapping and a remapped ID, and translates the
|
||||||
|
// ID to the mapped host ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||||
|
func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||||
|
hostID := m.HostID + (contID - m.ContainerID)
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIDMappings takes a requested user and group name and
|
||||||
|
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||||
|
// proper uid and gid remapping ranges for that user/group pair
|
||||||
|
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
||||||
|
subuidRanges, err := parseSubuid(username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
subgidRanges, err := parseSubgid(groupname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(subuidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||||
|
}
|
||||||
|
if len(subgidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createIDMap(subidRanges ranges) []IDMap {
|
||||||
|
idMap := []IDMap{}
|
||||||
|
|
||||||
|
// sort the ranges by lowest ID first
|
||||||
|
sort.Sort(subidRanges)
|
||||||
|
containerID := 0
|
||||||
|
for _, idrange := range subidRanges {
|
||||||
|
idMap = append(idMap, IDMap{
|
||||||
|
ContainerID: containerID,
|
||||||
|
HostID: idrange.Start,
|
||||||
|
Size: idrange.Length,
|
||||||
|
})
|
||||||
|
containerID = containerID + idrange.Length
|
||||||
|
}
|
||||||
|
return idMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubuid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subuidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubgid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subgidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubidFile(path, username string) (ranges, error) {
|
||||||
|
var rangeList ranges
|
||||||
|
|
||||||
|
subidFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
defer subidFile.Close()
|
||||||
|
|
||||||
|
s := bufio.NewScanner(subidFile)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.TrimSpace(s.Text())
|
||||||
|
if text == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Split(text, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||||
|
}
|
||||||
|
if parts[0] == username {
|
||||||
|
// return the first entry for a user; ignores potential for multiple ranges per user
|
||||||
|
startid, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
length, err := strconv.Atoi(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
rangeList = append(rangeList, subIDRange{startid, length})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rangeList, nil
|
||||||
|
}
|
||||||
60
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
60
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||||
|
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||||
|
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||||
|
// chown the full directory path if it exists
|
||||||
|
var paths []string
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = []string{path}
|
||||||
|
} else if err == nil && chownExisting {
|
||||||
|
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// short-circuit--we were called with an existing directory and chown was requested
|
||||||
|
return nil
|
||||||
|
} else if err == nil {
|
||||||
|
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if mkAll {
|
||||||
|
// walk back to "/" looking for directories which do not exist
|
||||||
|
// and add them to the paths array for chown after creation
|
||||||
|
dirPath := path
|
||||||
|
for {
|
||||||
|
dirPath = filepath.Dir(dirPath)
|
||||||
|
if dirPath == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = append(paths, dirPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// even if it existed, we will chown the requested path + any subpaths that
|
||||||
|
// didn't exist when we called MkdirAll
|
||||||
|
for _, pathComponent := range paths {
|
||||||
|
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
18
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
18
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||||
|
// just a wrapper around system.MkdirAll.
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
155
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
155
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||||
|
// Linux distribution commands:
|
||||||
|
// adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username>
|
||||||
|
// useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username>
|
||||||
|
// addgroup --gid <id> <groupname>
|
||||||
|
// groupadd -g <id> <groupname>
|
||||||
|
|
||||||
|
const baseUID int = 10000
|
||||||
|
const baseGID int = 10000
|
||||||
|
const idMAX int = 65534
|
||||||
|
|
||||||
|
var (
|
||||||
|
userCommand string
|
||||||
|
groupCommand string
|
||||||
|
|
||||||
|
cmdTemplates = map[string]string{
|
||||||
|
"adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s",
|
||||||
|
"useradd": "-M -u %d -s /bin/false -N -g %s %s",
|
||||||
|
"addgroup": "--gid %d %s",
|
||||||
|
"groupadd": "-g %d %s",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// set up which commands are used for adding users/groups dependent on distro
|
||||||
|
if _, err := resolveBinary("adduser"); err == nil {
|
||||||
|
userCommand = "adduser"
|
||||||
|
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||||
|
userCommand = "useradd"
|
||||||
|
}
|
||||||
|
if _, err := resolveBinary("addgroup"); err == nil {
|
||||||
|
groupCommand = "addgroup"
|
||||||
|
} else if _, err := resolveBinary("groupadd"); err == nil {
|
||||||
|
groupCommand = "groupadd"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveBinary(binname string) (string, error) {
|
||||||
|
binaryPath, err := exec.LookPath(binname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//only return no error if the final resolved binary basename
|
||||||
|
//matches what was searched for
|
||||||
|
if filepath.Base(resolvedPath) == binname {
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||||
|
// and calls the appropriate helper function to add the group and then
|
||||||
|
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||||
|
// This new user's /etc/sub{uid,gid} ranges will be used for user namespace
|
||||||
|
// mapping ranges in containers.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
// Find unused uid, gid pair
|
||||||
|
uid, err := findUnusedUID(baseUID)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err)
|
||||||
|
}
|
||||||
|
gid, err := findUnusedGID(baseGID)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// First add the group that we will use
|
||||||
|
if err := addGroup(name, gid); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err)
|
||||||
|
}
|
||||||
|
// Add the user as a member of the group
|
||||||
|
if err := addUser(name, uid, name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addUser(userName string, uid int, groupName string) error {
|
||||||
|
|
||||||
|
if userCommand == "" {
|
||||||
|
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||||
|
}
|
||||||
|
args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName)
|
||||||
|
return execAddCmd(userCommand, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addGroup(groupName string, gid int) error {
|
||||||
|
|
||||||
|
if groupCommand == "" {
|
||||||
|
return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found")
|
||||||
|
}
|
||||||
|
args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName)
|
||||||
|
// only error out if the error isn't that the group already exists
|
||||||
|
// if the group exists then our needs are already met
|
||||||
|
if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func execAddCmd(cmd, args string) error {
|
||||||
|
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||||
|
out, err := execCmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnusedUID(startUID int) (int, error) {
|
||||||
|
return findUnused("passwd", startUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnusedGID(startGID int) (int, error) {
|
||||||
|
return findUnused("group", startGID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnused(file string, id int) (int, error) {
|
||||||
|
for {
|
||||||
|
cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id)
|
||||||
|
cmd := exec.Command("sh", "-c", cmdStr)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
// if a non-zero return code occurs, then we know the ID was not found
|
||||||
|
// and is usable
|
||||||
|
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||||
|
// The program has exited with an exit code != 0
|
||||||
|
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||||
|
if status.ExitStatus() == 1 {
|
||||||
|
//no match, we can use this ID
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err)
|
||||||
|
}
|
||||||
|
id++
|
||||||
|
if id > idMAX {
|
||||||
|
return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
12
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||||
|
// and calls the appropriate helper function to add the group and then
|
||||||
|
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||||
|
}
|
||||||
152
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
152
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||||
|
const maxCap = 1e6
|
||||||
|
|
||||||
|
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||||
|
// a write to BytesPipe to block when allocating a new slice.
|
||||||
|
const blockThreshold = 1e6
|
||||||
|
|
||||||
|
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||||
|
var ErrClosed = errors.New("write to closed BytesPipe")
|
||||||
|
|
||||||
|
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||||
|
// All written data may be read at most once. Also, BytesPipe allocates
|
||||||
|
// and releases new byte slices to adjust to current needs, so the buffer
|
||||||
|
// won't be overgrown after peak loads.
|
||||||
|
type BytesPipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
wait *sync.Cond
|
||||||
|
buf [][]byte // slice of byte-slices of buffered data
|
||||||
|
lastRead int // index in the first slice to a read point
|
||||||
|
bufLen int // length of data buffered over the slices
|
||||||
|
closeErr error // error to return from next Read. set to nil if not closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||||
|
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||||
|
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||||
|
func NewBytesPipe(buf []byte) *BytesPipe {
|
||||||
|
if cap(buf) == 0 {
|
||||||
|
buf = make([]byte, 0, 64)
|
||||||
|
}
|
||||||
|
bp := &BytesPipe{
|
||||||
|
buf: [][]byte{buf[:0]},
|
||||||
|
}
|
||||||
|
bp.wait = sync.NewCond(&bp.mu)
|
||||||
|
return bp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to BytesPipe.
|
||||||
|
// It can allocate new []byte slices in a process of writing.
|
||||||
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
defer bp.mu.Unlock()
|
||||||
|
written := 0
|
||||||
|
for {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
return written, ErrClosed
|
||||||
|
}
|
||||||
|
// write data to the last buffer
|
||||||
|
b := bp.buf[len(bp.buf)-1]
|
||||||
|
// copy data to the current empty allocated area
|
||||||
|
n := copy(b[len(b):cap(b)], p)
|
||||||
|
// increment buffered data length
|
||||||
|
bp.bufLen += n
|
||||||
|
// include written data in last buffer
|
||||||
|
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
|
||||||
|
|
||||||
|
written += n
|
||||||
|
|
||||||
|
// if there was enough room to write all then break
|
||||||
|
if len(p) == n {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// more data: write to the next slice
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
// block if too much data is still in the buffer
|
||||||
|
for bp.bufLen >= blockThreshold {
|
||||||
|
bp.wait.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate slice that has twice the size of the last unless maximum reached
|
||||||
|
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
|
||||||
|
if nextCap > maxCap {
|
||||||
|
nextCap = maxCap
|
||||||
|
}
|
||||||
|
// add new byte slice to the buffers slice and continue writing
|
||||||
|
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
bp.closeErr = err
|
||||||
|
} else {
|
||||||
|
bp.closeErr = io.EOF
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) Close() error {
|
||||||
|
return bp.CloseWithError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *BytesPipe) len() int {
|
||||||
|
return bp.bufLen - bp.lastRead
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from BytesPipe.
|
||||||
|
// Data could be read only once.
|
||||||
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
defer bp.mu.Unlock()
|
||||||
|
if bp.len() == 0 {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.len() == 0 && bp.closeErr != nil {
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
read := copy(p, bp.buf[0][bp.lastRead:])
|
||||||
|
n += read
|
||||||
|
bp.lastRead += read
|
||||||
|
if bp.len() == 0 {
|
||||||
|
// we have read everything. reset to the beginning.
|
||||||
|
bp.lastRead = 0
|
||||||
|
bp.bufLen -= len(bp.buf[0])
|
||||||
|
bp.buf[0] = bp.buf[0][:0]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// break if everything was read
|
||||||
|
if len(p) == read {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// more buffered data and more asked. read from next slice.
|
||||||
|
p = p[read:]
|
||||||
|
bp.lastRead = 0
|
||||||
|
bp.bufLen -= len(bp.buf[0])
|
||||||
|
bp.buf[0] = nil // throw away old slice
|
||||||
|
bp.buf = bp.buf[1:] // switch to next
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
return
|
||||||
|
}
|
||||||
22
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
22
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||||
|
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||||
|
if value != "" {
|
||||||
|
return fmt.Fprintf(w, format, value)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintfIfTrue prints the boolean value if it's true
|
||||||
|
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
||||||
|
if ok {
|
||||||
|
return fmt.Fprintf(w, format, ok)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user