Merge pull request #46 from kadel/openshift

Inital support for Openshift.
This commit is contained in:
Tuna 2016-07-22 01:55:25 +07:00 committed by GitHub
commit 12b21db180
597 changed files with 238435 additions and 85652 deletions

View File

@ -12,7 +12,7 @@ branches:
install:
- go get github.com/mitchellh/gox
- go get github.com/tools/godep
- godep restore
- ./script/godep-restore.sh
script:
- make binary

457
Godeps/Godeps.json generated
View File

@ -24,6 +24,11 @@
"Comment": "v0.8.2",
"Rev": "98a1428efc3d732f9e377b50c8e2113e070896cf"
},
{
"ImportPath": "github.com/blang/semver",
"Comment": "v3.0.1",
"Rev": "31b736133b98f26d5e078ec9eb591666edfd091f"
},
{
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
@ -32,6 +37,11 @@
"ImportPath": "github.com/davecgh/go-spew/spew",
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
},
{
"ImportPath": "github.com/dgrijalva/jwt-go",
"Comment": "v2.2.0-23-g5ca8014",
"Rev": "5ca80149b9d3f8b863af0e2bb6742e608603bd99"
},
{
"ImportPath": "github.com/docker/distribution",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
@ -47,6 +57,21 @@
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
},
{
"ImportPath": "github.com/docker/distribution/manifest",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
},
{
"ImportPath": "github.com/docker/distribution/manifest/schema1",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
},
{
"ImportPath": "github.com/docker/distribution/manifest/schema2",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
"Rev": "4e17ab5d319ac5b70b2769442947567a83386fbc"
},
{
"ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
@ -477,6 +502,25 @@
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libtrust",
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
},
{
"ImportPath": "github.com/emicklei/go-restful",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/emicklei/go-restful/log",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/emicklei/go-restful/swagger",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/fatih/structs",
"Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e"
@ -485,6 +529,78 @@
"ImportPath": "github.com/flynn/go-shlex",
"Rev": "3f9db97f856818214da2e1057f8ad84803971cff"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
},
{
"ImportPath": "github.com/ghodss/yaml",
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
@ -535,6 +651,86 @@
"Comment": "v0.0.7",
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
},
{
"ImportPath": "github.com/openshift/origin/pkg/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/api/extension",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/authorization/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/build/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/deploy/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/deploy/api/install",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/deploy/api/v1",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/image/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/oauth/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/project/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/route/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/sdn/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/security/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/template/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/user/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/util/namer",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
@ -587,6 +783,14 @@
"ImportPath": "golang.org/x/net/context",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/http2",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/proxy",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
@ -606,163 +810,288 @@
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/endpoints",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/errors",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/install",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/meta",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/pod",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/resource",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/service",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/unversioned",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/util",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/v1",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/validation",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/auth/user",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/capabilities",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/conversion",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/fields",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/labels",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/securitycontextconstraints/util",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/serviceaccount",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/types",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/errors",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/framer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/hash",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/intstr",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/json",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/net",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/net/sets",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/parsers",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/rand",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/runtime",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/sets",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/validation",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/validation/field",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/wait",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/yaml",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/watch",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/watch/versioned",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/third_party/forked/reflect",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
}
]
}

View File

@ -33,6 +33,10 @@ import (
"encoding/json"
"io/ioutil"
// install kubernetes api
_ "k8s.io/kubernetes/pkg/api/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
@ -41,6 +45,10 @@ import (
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
deployapi "github.com/openshift/origin/pkg/deploy/api"
// install kubernetes api
_ "github.com/openshift/origin/pkg/deploy/api/install"
"github.com/fatih/structs"
"github.com/ghodss/yaml"
)
@ -392,6 +400,39 @@ func initRS(name string, service ServiceConfig) *extensions.ReplicaSet {
return rs
}
// initDeploymentConfig initialize OpenShifts DeploymentConfig object
func initDeploymentConfig(name string, service ServiceConfig) *deployapi.DeploymentConfig {
dc := &deployapi.DeploymentConfig{
TypeMeta: unversioned.TypeMeta{
Kind: "DeploymentConfig",
APIVersion: "v1",
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: map[string]string{"service": name},
},
Spec: deployapi.DeploymentConfigSpec{
Replicas: 1,
Selector: map[string]string{"service": name},
//UniqueLabelKey: p.Name,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"service": name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: service.Image,
},
},
},
},
},
}
return dc
}
// Configure the environment variables.
func configEnvs(name string, service ServiceConfig) []api.EnvVar {
envs := []api.EnvVar{}
@ -412,8 +453,6 @@ func configVolumes(service ServiceConfig) ([]api.VolumeMount, []api.Volume) {
for _, volume := range service.Volumes {
character := ":"
if strings.Contains(volume, character) {
hostDir := volume[0:strings.Index(volume, character)]
hostDir = strings.TrimSpace(hostDir)
containerDir := volume[strings.Index(volume, character)+1:]
containerDir = strings.TrimSpace(containerDir)
@ -431,11 +470,10 @@ func configVolumes(service ServiceConfig) ([]api.VolumeMount, []api.Volume) {
volumeName := RandStringBytes(20)
volumesMount = append(volumesMount, api.VolumeMount{Name: volumeName, ReadOnly: readonly, MountPath: containerDir})
p := &api.HostPathVolumeSource{
Path: hostDir,
}
//p.Path = hostDir
volumeSource := api.VolumeSource{HostPath: p}
emptyDir := &api.EmptyDirVolumeSource{}
volumeSource := api.VolumeSource{EmptyDir: emptyDir}
volumes = append(volumes, api.Volume{Name: volumeName, VolumeSource: volumeSource})
}
}
@ -456,7 +494,6 @@ func configPorts(name string, service ServiceConfig) []api.ContainerPort {
p = api.ProtocolUDP
}
ports = append(ports, api.ContainerPort{
HostPort: port.HostPort,
ContainerPort: port.ContainerPort,
Protocol: p,
})
@ -492,17 +529,25 @@ func configServicePorts(name string, service ServiceConfig) []api.ServicePort {
}
// Transform data to json/yaml
func transformer(v interface{}, entity string, generateYaml bool) ([]byte, string) {
func transformer(obj runtime.Object, generateYaml bool) ([]byte, error) {
// Convert to versioned object
objectVersion := obj.GetObjectKind().GroupVersionKind()
version := unversioned.GroupVersion{Group: objectVersion.Group, Version: objectVersion.Version}
versionedObj, err := api.Scheme.ConvertToVersion(obj, version)
if err != nil {
return nil, err
}
// convert data to json / yaml
data, err := json.MarshalIndent(v, "", " ")
data, err := json.MarshalIndent(versionedObj, "", " ")
if generateYaml == true {
data, err = yaml.Marshal(v)
data, err = yaml.Marshal(versionedObj)
}
if err != nil {
return nil, "Failed to marshal the " + entity
return nil, err
}
logrus.Debugf("%s\n", data)
return data, ""
return data, nil
}
// load Environment Variable from bundles file
@ -543,7 +588,7 @@ func loadEnvVars(service bundlefile.Service) ([]EnvVar, string) {
}
// load Environment Variable from compose file
func loadEnvVarsFromCompose(e map[string]string) ([]EnvVar) {
func loadEnvVarsFromCompose(e map[string]string) []EnvVar {
envs := []EnvVar{}
for k, v := range e {
envs = append(envs, EnvVar{
@ -739,12 +784,14 @@ func loadComposeFile(file string, c *cli.Context) KomposeObject {
}
// Convert komposeObject to K8S controllers
func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, createDS, createChart, generateYaml bool, replicas int, inputFile string, outFile string, f *os.File) {
func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, createDS, createChart, createDeploymentConfigs, generateYaml bool, replicas int, inputFile string, outFile string, f *os.File) {
mServices := make(map[string][]byte)
mReplicationControllers := make(map[string][]byte)
mDeployments := make(map[string][]byte)
mDaemonSets := make(map[string][]byte)
mReplicaSets := make(map[string][]byte)
// OpenShift DeploymentConfigs
mDeploymentConfigs := make(map[string][]byte)
var svcnames []string
for name, service := range komposeObject.ServiceConfigs {
@ -757,6 +804,7 @@ func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, cr
dc := initDC(name, service)
ds := initDS(name, service)
rs := initRS(name, service)
osDC := initDeploymentConfig(name, service) // OpenShift DeploymentConfigs
// Configure the environment variables.
envs := configEnvs(name, service)
@ -821,35 +869,43 @@ func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, cr
updateController(rs, fillTemplate, fillObjectMeta)
updateController(dc, fillTemplate, fillObjectMeta)
updateController(ds, fillTemplate, fillObjectMeta)
// OpenShift DeploymentConfigs
updateController(osDC, fillTemplate, fillObjectMeta)
// convert datarc to json / yaml
datarc, err := transformer(rc, "replication controller", generateYaml)
if err != "" {
logrus.Fatalf(err)
datarc, err := transformer(rc, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
// convert datadc to json / yaml
datadc, err := transformer(dc, "deployment", generateYaml)
if err != "" {
logrus.Fatalf(err)
datadc, err := transformer(dc, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
// convert datads to json / yaml
datads, err := transformer(ds, "daemonSet", generateYaml)
if err != "" {
logrus.Fatalf(err)
datads, err := transformer(ds, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
// convert datars to json / yaml
datars, err := transformer(rs, "replicaSet", generateYaml)
if err != "" {
logrus.Fatalf(err)
datars, err := transformer(rs, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
// convert datasvc to json / yaml
datasvc, err := transformer(sc, "service controller", generateYaml)
if err != "" {
logrus.Fatalf(err)
datasvc, err := transformer(sc, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
// convert OpenShift DeploymentConfig to json / yaml
dataDeploymentConfig, err := transformer(osDC, generateYaml)
if err != nil {
logrus.Fatalf(err.Error())
}
mServices[name] = datasvc
@ -857,6 +913,7 @@ func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, cr
mDeployments[name] = datadc
mDaemonSets[name] = datads
mReplicaSets[name] = datars
mDeploymentConfigs[name] = dataDeploymentConfig
}
for k, v := range mServices {
@ -900,6 +957,12 @@ func komposeConvert(komposeObject KomposeObject, toStdout, createD, createRS, cr
logrus.Fatalf("Failed to create Chart data: %s\n", err)
}
}
if createDeploymentConfigs {
for k, v := range mDeploymentConfigs {
print(k, "deploymentconfig", v, toStdout, generateYaml, f)
}
}
}
// Convert tranforms docker compose or dab file to k8s objects
@ -915,9 +978,10 @@ func Convert(c *cli.Context) {
fromBundles := c.BoolT("from-bundles")
replicas := c.Int("replicationcontroller")
singleOutput := len(outFile) != 0 || toStdout
createDeploymentConfig := c.BoolT("deploymentconfig")
// Create Deployment by default if no controller has be set
if !createD && !createDS && !createRS && replicas == 0 {
if !createD && !createDS && !createRS && replicas == 0 && !createDeploymentConfig {
createD = true
}
@ -942,6 +1006,9 @@ func Convert(c *cli.Context) {
if replicas != 0 {
count++
}
if createDeploymentConfig {
count++
}
if count > 1 {
logrus.Fatalf("Error: only one type of Kubernetes controller can be generated when --out or --stdout is specified")
}
@ -962,7 +1029,7 @@ func Convert(c *cli.Context) {
}
// Convert komposeObject to K8S controllers
komposeConvert(komposeObject, toStdout, createD, createRS, createDS, createChart, generateYaml, replicas, inputFile, outFile, f)
komposeConvert(komposeObject, toStdout, createD, createRS, createDS, createChart, createDeploymentConfig, generateYaml, replicas, inputFile, outFile, f)
}
func checkUnsupportedKey(service ServiceConfig) {
@ -1098,5 +1165,8 @@ func updateController(obj runtime.Object, updateTemplate func(*api.PodTemplateSp
case *extensions.DaemonSet:
updateTemplate(&t.Spec.Template)
updateMeta(&t.ObjectMeta)
case *deployapi.DeploymentConfig:
updateTemplate(t.Spec.Template)
updateMeta(&t.ObjectMeta)
}
}

View File

@ -49,6 +49,10 @@ func ConvertCommand() cli.Command {
Name: "daemonset,ds",
Usage: "Generate a daemonset resource file",
},
cli.BoolFlag{
Name: "deploymentconfig,dc",
Usage: "Generate a DeploymentConfig for OpenShift",
},
cli.IntFlag{
Name: "replicationcontroller,rc",
Value: 0,

29
script/godep-restore.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
# inspired by: https://github.com/openshift/origin/blob/master/hack/godep-restore.sh
# Sometimes godep needs 'other' remotes. So add those remotes
preload-remote() {
local orig_org="$1"
local orig_project="$2"
local alt_org="$3"
local alt_project="$4"
# Go get stinks, which is why we have the || true...
go get -d "${orig_org}/${orig_project}" &>/dev/null || true
repo_dir="${GOPATH}/src/${orig_org}/${orig_project}"
pushd "${repo_dir}" > /dev/null
git remote add "${alt_org}-remote" "https://${alt_org}/${alt_project}.git" > /dev/null || true
git remote update > /dev/null
popd > /dev/null
}
echo "Preloading some dependencies"
# OpenShift requires its own Kubernets fork :-(
preload-remote "k8s.io" "kubernetes" "github.com/openshift" "kubernetes"
echo "Starting to download all godeps. This takes a while"
godep restore
echo "Download finished into ${GOPATH}"

22
vendor/github.com/blang/semver/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

142
vendor/github.com/blang/semver/README.md generated vendored Normal file
View File

@ -0,0 +1,142 @@
semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
Usage
-----
```bash
$ go get github.com/blang/semver
```
Note: Always vendor your dependencies or fix on a specific version tag.
```go
import github.com/blang/semver
v1, err := semver.Make("1.0.0-beta")
v2, err := semver.Make("2.0.0-beta")
v1.Compare(v2)
```
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
Why should I use this lib?
-----
- Fully spec compatible
- No reflection
- No regex
- Fully tested (Coverage >99%)
- Readable parsing/validation errors
- Fast (See [Benchmarks](#benchmarks))
- Only Stdlib
- Uses values instead of pointers
- Many features, see below
Features
-----
- Parsing and validation at all levels
- Comparator-like comparisons
- Compare Helper Methods
- InPlace manipulation
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
Example
-----
Have a look at full examples in [examples/main.go](examples/main.go)
```go
import github.com/blang/semver
v, err := semver.Make("0.0.1-alpha.preview+123.github")
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
fmt.Printf("Pre: %s\n", v.Pre)
fmt.Printf("Build: %s\n", v.Build)
// Prerelease versions array
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data array
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
v001, err := semver.Make("0.0.1")
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
v001.GT(v) == true
v.LT(v001) == true
v.GTE(v) == true
v.LTE(v) == true
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
v001.Compare(v) == 1
v.Compare(v001) == -1
v.Compare(v) == 0
// Manipulate Version in place:
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
```
Benchmarks
-----
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
See benchmark cases at [semver_test.go](semver_test.go)
Motivation
-----
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
Contribution
-----
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
License
-----
See [LICENSE](LICENSE) file.

23
vendor/github.com/blang/semver/json.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package semver
import (
"encoding/json"
)
// MarshalJSON implements the encoding/json.Marshaler interface.
func (v Version) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (v *Version) UnmarshalJSON(data []byte) (err error) {
var versionString string
if err = json.Unmarshal(data, &versionString); err != nil {
return
}
*v, err = Parse(versionString)
return
}

395
vendor/github.com/blang/semver/semver.go generated vendored Normal file
View File

@ -0,0 +1,395 @@
package semver
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
numbers string = "0123456789"
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
alphanum = alphas + numbers
)
// SpecVersion is the latest fully supported spec version of semver
var SpecVersion = Version{
Major: 2,
Minor: 0,
Patch: 0,
}
// Version represents a semver compatible version
type Version struct {
Major uint64
Minor uint64
Patch uint64
Pre []PRVersion
Build []string //No Precendence
}
// Version to string
func (v Version) String() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
if len(v.Pre) > 0 {
b = append(b, '-')
b = append(b, v.Pre[0].String()...)
for _, pre := range v.Pre[1:] {
b = append(b, '.')
b = append(b, pre.String()...)
}
}
if len(v.Build) > 0 {
b = append(b, '+')
b = append(b, v.Build[0]...)
for _, build := range v.Build[1:] {
b = append(b, '.')
b = append(b, build...)
}
}
return string(b)
}
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
}
// EQ checks if v is equal to o.
func (v Version) EQ(o Version) bool {
return (v.Compare(o) == 0)
}
// NE checks if v is not equal to o.
func (v Version) NE(o Version) bool {
return (v.Compare(o) != 0)
}
// GT checks if v is greater than o.
func (v Version) GT(o Version) bool {
return (v.Compare(o) == 1)
}
// GTE checks if v is greater than or equal to o.
func (v Version) GTE(o Version) bool {
return (v.Compare(o) >= 0)
}
// GE checks if v is greater than or equal to o.
func (v Version) GE(o Version) bool {
return (v.Compare(o) >= 0)
}
// LT checks if v is less than o.
func (v Version) LT(o Version) bool {
return (v.Compare(o) == -1)
}
// LTE checks if v is less than or equal to o.
func (v Version) LTE(o Version) bool {
return (v.Compare(o) <= 0)
}
// LE checks if v is less than or equal to o.
func (v Version) LE(o Version) bool {
return (v.Compare(o) <= 0)
}
// Compare compares Versions v to o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major > o.Major {
return 1
}
return -1
}
if v.Minor != o.Minor {
if v.Minor > o.Minor {
return 1
}
return -1
}
if v.Patch != o.Patch {
if v.Patch > o.Patch {
return 1
}
return -1
}
// Quick comparison if a version has no prerelease versions
if len(v.Pre) == 0 && len(o.Pre) == 0 {
return 0
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
return 1
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
return -1
}
i := 0
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
continue
} else if comp == 1 {
return 1
} else {
return -1
}
}
// If all pr versions are the equal but one has further prversion, this one greater
if i == len(v.Pre) && i == len(o.Pre) {
return 0
} else if i == len(v.Pre) && i < len(o.Pre) {
return -1
} else {
return 1
}
}
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
for _, pre := range v.Pre {
if !pre.IsNum { //Numeric prerelease versions already uint64
if len(pre.VersionStr) == 0 {
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
}
if !containsOnly(pre.VersionStr, alphanum) {
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
}
}
}
for _, build := range v.Build {
if len(build) == 0 {
return fmt.Errorf("Build meta data can not be empty %q", build)
}
if !containsOnly(build, alphanum) {
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
}
}
return nil
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) {
v, err := Parse(s)
vp = &v
return
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
func Make(s string) (Version, error) {
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
return Version{}, errors.New("Version string empty")
}
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) != 3 {
return Version{}, errors.New("No Major.Minor.Patch elements found")
}
// Major
if !containsOnly(parts[0], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
}
if hasLeadingZeroes(parts[0]) {
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
}
major, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return Version{}, err
}
// Minor
if !containsOnly(parts[1], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
}
if hasLeadingZeroes(parts[1]) {
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
}
minor, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return Version{}, err
}
v := Version{}
v.Major = major
v.Minor = minor
var build, prerelease []string
patchStr := parts[2]
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
build = strings.Split(patchStr[buildIndex+1:], ".")
patchStr = patchStr[:buildIndex]
}
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
prerelease = strings.Split(patchStr[preIndex+1:], ".")
patchStr = patchStr[:preIndex]
}
if !containsOnly(patchStr, numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
}
if hasLeadingZeroes(patchStr) {
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
}
patch, err := strconv.ParseUint(patchStr, 10, 64)
if err != nil {
return Version{}, err
}
v.Patch = patch
// Prerelease
for _, prstr := range prerelease {
parsedPR, err := NewPRVersion(prstr)
if err != nil {
return Version{}, err
}
v.Pre = append(v.Pre, parsedPR)
}
// Build meta data
for _, str := range build {
if len(str) == 0 {
return Version{}, errors.New("Build meta data is empty")
}
if !containsOnly(str, alphanum) {
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
}
v.Build = append(v.Build, str)
}
return v, nil
}
// MustParse is like Parse but panics if the version cannot be parsed.
func MustParse(s string) Version {
v, err := Parse(s)
if err != nil {
panic(`semver: Parse(` + s + `): ` + err.Error())
}
return v
}
// PRVersion represents a PreRelease Version
type PRVersion struct {
VersionStr string
VersionNum uint64
IsNum bool
}
// NewPRVersion creates a new valid prerelease version
func NewPRVersion(s string) (PRVersion, error) {
if len(s) == 0 {
return PRVersion{}, errors.New("Prerelease is empty")
}
v := PRVersion{}
if containsOnly(s, numbers) {
if hasLeadingZeroes(s) {
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
}
num, err := strconv.ParseUint(s, 10, 64)
// Might never be hit, but just in case
if err != nil {
return PRVersion{}, err
}
v.VersionNum = num
v.IsNum = true
} else if containsOnly(s, alphanum) {
v.VersionStr = s
v.IsNum = false
} else {
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
}
return v, nil
}
// IsNumeric checks if prerelease-version is numeric
func (v PRVersion) IsNumeric() bool {
return v.IsNum
}
// Compare compares two PreRelease Versions v and o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v PRVersion) Compare(o PRVersion) int {
if v.IsNum && !o.IsNum {
return -1
} else if !v.IsNum && o.IsNum {
return 1
} else if v.IsNum && o.IsNum {
if v.VersionNum == o.VersionNum {
return 0
} else if v.VersionNum > o.VersionNum {
return 1
} else {
return -1
}
} else { // both are Alphas
if v.VersionStr == o.VersionStr {
return 0
} else if v.VersionStr > o.VersionStr {
return 1
} else {
return -1
}
}
}
// PreRelease version to string
func (v PRVersion) String() string {
if v.IsNum {
return strconv.FormatUint(v.VersionNum, 10)
}
return v.VersionStr
}
func containsOnly(s string, set string) bool {
return strings.IndexFunc(s, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) == -1
}
func hasLeadingZeroes(s string) bool {
return len(s) > 1 && s[0] == '0'
}
// NewBuildVersion creates a new valid build version
func NewBuildVersion(s string) (string, error) {
if len(s) == 0 {
return "", errors.New("Buildversion is empty")
}
if !containsOnly(s, alphanum) {
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
}
return s, nil
}

28
vendor/github.com/blang/semver/sort.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package semver
import (
"sort"
)
// Versions represents multiple versions.
type Versions []Version
// Len returns length of version collection
func (s Versions) Len() int {
return len(s)
}
// Swap swaps two versions inside the collection by its indices
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less checks if version at index i is less than version at index j
func (s Versions) Less(i, j int) bool {
return s[i].LT(s[j])
}
// Sort sorts a slice of versions
func Sort(versions []Version) {
sort.Sort(Versions(versions))
}

30
vendor/github.com/blang/semver/sql.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package semver
import (
"database/sql/driver"
"fmt"
)
// Scan implements the database/sql.Scanner interface.
func (v *Version) Scan(src interface{}) (err error) {
var str string
switch src := src.(type) {
case string:
str = src
case []byte:
str = string(src)
default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
}
if t, err := Parse(str); err == nil {
*v = t
}
return
}
// Value implements the database/sql/driver.Valuer interface.
func (v Version) Value() (driver.Value, error) {
return v.String(), nil
}

4
vendor/github.com/dgrijalva/jwt-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
.DS_Store
bin

8
vendor/github.com/dgrijalva/jwt-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright (c) 2012 Dave Grijalva
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

61
vendor/github.com/dgrijalva/jwt-go/README.md generated vendored Normal file
View File

@ -0,0 +1,61 @@
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html)
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
## What the heck is a JWT?
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own.
## Parse and Verify
Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used.
```go
token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return myLookupKey(token.Header["kid"])
})
if err == nil && token.Valid {
deliverGoodness("!")
} else {
deliverUtterRejection(":(")
}
```
## Create a token
```go
// Create the token
token := jwt.New(jwt.SigningMethodHS256)
// Set some claims
token.Claims["foo"] = "bar"
token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix()
// Sign and get the complete encoded token as a string
tokenString, err := token.SignedString(mySigningKey)
```
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b).

54
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
## `jwt-go` Version History
#### 2.2.0
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
#### 2.1.0
Backwards compatible API change that was missed in 2.0.0.
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
#### 2.0.0
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
* **Compatibility Breaking Changes**
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
* `KeyFunc` now returns `interface{}` instead of `[]byte`
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodHS256`
* Added public package global `SigningMethodHS384`
* Added public package global `SigningMethodHS512`
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodRS256`
* Added public package global `SigningMethodRS384`
* Added public package global `SigningMethodRS512`
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
* Refactored the RSA implementation to be easier to read
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
#### 1.0.2
* Fixed bug in parsing public keys from certificates
* Added more tests around the parsing of keys for RS256
* Code refactoring in RS256 implementation. No functional changes
#### 1.0.1
* Fixed panic if RS256 signing method was passed an invalid key
#### 1.0.0
* First versioned release
* API stabilized
* Supports creating, signing, parsing, and validating JWT tokens
* Supports RS256 and HS256 signing methods

4
vendor/github.com/dgrijalva/jwt-go/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
//
// See README.md for more info.
package jwt

43
vendor/github.com/dgrijalva/jwt-go/errors.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package jwt
import (
"errors"
)
// Error constants
var (
ErrInvalidKey = errors.New("key is invalid or of invalid type")
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
ErrNoTokenInRequest = errors.New("no token present in request")
)
// The errors that might occur when parsing and validating a token
const (
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
ValidationErrorUnverifiable // Token could not be verified because of signing problems
ValidationErrorSignatureInvalid // Signature validation failed
ValidationErrorExpired // Exp validation failed
ValidationErrorNotValidYet // NBF validation failed
)
// The error from Parse if token is not valid
type ValidationError struct {
err string
Errors uint32 // bitfield. see ValidationError... constants
}
// Validation error is an error type
func (e ValidationError) Error() string {
if e.err == "" {
return "token is invalid"
}
return e.err
}
// No errors
func (e *ValidationError) valid() bool {
if e.Errors > 0 {
return false
}
return true
}

84
vendor/github.com/dgrijalva/jwt-go/hmac.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package jwt
import (
"crypto"
"crypto/hmac"
"errors"
)
// Implements the HMAC-SHA family of signing methods signing methods
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
}
// Specific instances for HS256 and company
var (
SigningMethodHS256 *SigningMethodHMAC
SigningMethodHS384 *SigningMethodHMAC
SigningMethodHS512 *SigningMethodHMAC
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
// HS256
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
return SigningMethodHS256
})
// HS384
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
return SigningMethodHS384
})
// HS512
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
return SigningMethodHS512
})
}
func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
if keyBytes, ok := key.([]byte); ok {
var sig []byte
var err error
if sig, err = DecodeSegment(signature); err == nil {
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
if !hmac.Equal(sig, hasher.Sum(nil)) {
err = ErrSignatureInvalid
}
}
return err
}
return ErrInvalidKey
}
// Implements the Sign method from SigningMethod for this signing method.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
}
return "", ErrInvalidKey
}

198
vendor/github.com/dgrijalva/jwt-go/jwt.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package jwt
import (
"encoding/base64"
"encoding/json"
"net/http"
"strings"
"time"
)
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
// You can override it to use another time value. This is useful for testing or if your
// server uses a different time zone than your tokens.
var TimeFunc = time.Now
// Parse methods use this callback function to supply
// the key for verification. The function receives the parsed,
// but unverified Token. This allows you to use propries in the
// Header of the token (such as `kid`) to identify which key to use.
type Keyfunc func(*Token) (interface{}, error)
// A JWT Token. Different fields will be used depending on whether you're
// creating or parsing/verifying a token.
type Token struct {
Raw string // The raw token. Populated when you Parse a token
Method SigningMethod // The signing method used or to be used
Header map[string]interface{} // The first segment of the token
Claims map[string]interface{} // The second segment of the token
Signature string // The third segment of the token. Populated when you Parse a token
Valid bool // Is the token valid? Populated when you Parse/Verify a token
}
// Create a new Token. Takes a signing method
func New(method SigningMethod) *Token {
return &Token{
Header: map[string]interface{}{
"typ": "JWT",
"alg": method.Alg(),
},
Claims: make(map[string]interface{}),
Method: method,
}
}
// Get the complete, signed token
func (t *Token) SignedString(key interface{}) (string, error) {
var sig, sstr string
var err error
if sstr, err = t.SigningString(); err != nil {
return "", err
}
if sig, err = t.Method.Sign(sstr, key); err != nil {
return "", err
}
return strings.Join([]string{sstr, sig}, "."), nil
}
// Generate the signing string. This is the
// most expensive part of the whole deal. Unless you
// need this for something special, just go straight for
// the SignedString.
func (t *Token) SigningString() (string, error) {
var err error
parts := make([]string, 2)
for i, _ := range parts {
var source map[string]interface{}
if i == 0 {
source = t.Header
} else {
source = t.Claims
}
var jsonValue []byte
if jsonValue, err = json.Marshal(source); err != nil {
return "", err
}
parts[i] = EncodeSegment(jsonValue)
}
return strings.Join(parts, "."), nil
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
parts := strings.Split(tokenString, ".")
if len(parts) != 3 {
return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed}
}
var err error
token := &Token{Raw: tokenString}
// parse Header
var headerBytes []byte
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
// parse Claims
var claimBytes []byte
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(claimBytes, &token.Claims); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
// Lookup signature method
if method, ok := token.Header["alg"].(string); ok {
if token.Method = GetSigningMethod(method); token.Method == nil {
return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable}
}
} else {
return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable}
}
// Lookup key
var key interface{}
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable}
}
if key, err = keyFunc(token); err != nil {
// keyFunc returned an error
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable}
}
// Check expiration times
vErr := &ValidationError{}
now := TimeFunc().Unix()
if exp, ok := token.Claims["exp"].(float64); ok {
if now > int64(exp) {
vErr.err = "token is expired"
vErr.Errors |= ValidationErrorExpired
}
}
if nbf, ok := token.Claims["nbf"].(float64); ok {
if now < int64(nbf) {
vErr.err = "token is not valid yet"
vErr.Errors |= ValidationErrorNotValidYet
}
}
// Perform validation
if err = token.Method.Verify(strings.Join(parts[0:2], "."), parts[2], key); err != nil {
vErr.err = err.Error()
vErr.Errors |= ValidationErrorSignatureInvalid
}
if vErr.valid() {
token.Valid = true
return token, nil
}
return token, vErr
}
// Try to find the token in an http.Request.
// This method will call ParseMultipartForm if there's no token in the header.
// Currently, it looks in the Authorization header as well as
// looking for an 'access_token' request parameter in req.Form.
func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) {
// Look for an Authorization header
if ah := req.Header.Get("Authorization"); ah != "" {
// Should be a bearer token
if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" {
return Parse(ah[7:], keyFunc)
}
}
// Look for "access_token" parameter
req.ParseMultipartForm(10e6)
if tokStr := req.Form.Get("access_token"); tokStr != "" {
return Parse(tokStr, keyFunc)
}
return nil, ErrNoTokenInRequest
}
// Encode JWT specific base64url encoding with padding stripped
func EncodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}
// Decode JWT specific base64url encoding with padding stripped
func DecodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}

114
vendor/github.com/dgrijalva/jwt-go/rsa.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSA family of signing methods signing methods
type SigningMethodRSA struct {
Name string
Hash crypto.Hash
}
// Specific instances for RS256 and company
var (
SigningMethodRS256 *SigningMethodRSA
SigningMethodRS384 *SigningMethodRSA
SigningMethodRS512 *SigningMethodRSA
)
func init() {
// RS256
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
return SigningMethodRS256
})
// RS384
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
return SigningMethodRS384
})
// RS512
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
return SigningMethodRS512
})
}
func (m *SigningMethodRSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as
// []byte, or an rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
switch k := key.(type) {
case []byte:
if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil {
return err
}
case *rsa.PublicKey:
rsaKey = k
default:
return ErrInvalidKey
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
// Implements the Sign method from SigningMethod
// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as
// []byte, or an rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var err error
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
case []byte:
if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil {
return "", err
}
case *rsa.PrivateKey:
rsaKey = k
default:
return "", ErrInvalidKey
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

68
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package jwt
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
)
// Parse PEM encoded PKCS1 or PKCS8 private key
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
}
var pkey *rsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *rsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}

24
vendor/github.com/dgrijalva/jwt-go/signing_method.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package jwt
var signingMethods = map[string]func() SigningMethod{}
// Signing method
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error
Sign(signingString string, key interface{}) (string, error)
Alg() string
}
// Register the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethods[alg] = f
}
// Get a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
if methodF, ok := signingMethods[alg]; ok {
method = methodF()
}
return
}

View File

@ -0,0 +1 @@
package manifest

View File

@ -0,0 +1,283 @@
package schema1
import (
"crypto/sha512"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
)
type diffID digest.Digest
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
// (1024 NULL bytes)
var gzippedEmptyTar = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
// gzippedEmptyTar
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
// configManifestBuilder is a type for constructing manifests from an image
// configuration and generic descriptors.
type configManifestBuilder struct {
// bs is a BlobService used to create empty layer tars in the
// blob store if necessary.
bs distribution.BlobService
// pk is the libtrust private key used to sign the final manifest.
pk libtrust.PrivateKey
// configJSON is configuration supplied when the ManifestBuilder was
// created.
configJSON []byte
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
ref reference.Named
// descriptors is the set of descriptors referencing the layers.
descriptors []distribution.Descriptor
// emptyTarDigest is set to a valid digest if an empty tar has been
// put in the blob store; otherwise it is empty.
emptyTarDigest digest.Digest
}
// NewConfigManifestBuilder is used to build new manifests for the current
// schema version from an image configuration and a set of descriptors.
// It takes a BlobService so that it can add an empty tar to the blob store
// if the resulting manifest needs empty layers.
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
return &configManifestBuilder{
bs: bs,
pk: pk,
configJSON: configJSON,
ref: ref,
}
}
// Build produces a final manifest from the given references
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
type imageRootFS struct {
Type string `json:"type"`
DiffIDs []diffID `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
type imageHistory struct {
Created time.Time `json:"created"`
Author string `json:"author,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}
type imageConfig struct {
RootFS *imageRootFS `json:"rootfs,omitempty"`
History []imageHistory `json:"history,omitempty"`
Architecture string `json:"architecture,omitempty"`
}
var img imageConfig
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
return nil, err
}
if len(img.History) == 0 {
return nil, errors.New("empty history when trying to create schema1 manifest")
}
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
return nil, errors.New("number of descriptors and number of layers in rootfs must match")
}
// Generate IDs for each layer
// For non-top-level layers, create fake V1Compatibility strings that
// fit the format and don't collide with anything else, but don't
// result in runnable images on their own.
type v1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
history := make([]History, len(img.History))
parent := ""
layerCounter := 0
for i, h := range img.History[:len(img.History)-1] {
var blobsum digest.Digest
if h.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
layerCounter++
}
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
if i == 0 && img.RootFS.BaseLayer != "" {
// windows-only baselayer setup
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
parent = fmt.Sprintf("%x", baseID[:32])
}
v1Compatibility := v1Compatibility{
ID: v1ID,
Parent: parent,
Comment: h.Comment,
Created: h.Created,
Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
v1Compatibility.ThrowAway = true
}
jsonBytes, err := json.Marshal(&v1Compatibility)
if err != nil {
return nil, err
}
reversedIndex := len(img.History) - i - 1
history[reversedIndex].V1Compatibility = string(jsonBytes)
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
parent = v1ID
}
latestHistory := img.History[len(img.History)-1]
var blobsum digest.Digest
if latestHistory.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
}
fsLayerList[0] = FSLayer{BlobSum: blobsum}
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
// Top-level v1compatibility string should be a modified version of the
// image config.
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(transformedConfig)
tag := ""
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
mfst := Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: mb.ref.Name(),
Tag: tag,
Architecture: img.Architecture,
FSLayers: fsLayerList,
History: history,
}
return Sign(&mfst, mb.pk)
}
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
// already exist, and returns its blobsum.
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
if mb.emptyTarDigest != "" {
// Already put an empty tar
return mb.emptyTarDigest, nil
}
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
switch err {
case nil:
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
case distribution.ErrBlobUnknown:
// nop
default:
return "", err
}
// Add gzipped empty tar to the blob store
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
if err != nil {
return "", err
}
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
// todo: verification here?
mb.descriptors = append(mb.descriptors, d.Descriptor())
return nil
}
// References returns the current references added to this builder
func (mb *configManifestBuilder) References() []distribution.Descriptor {
return mb.descriptors
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
delete(configAsMap, "rootfs")
delete(configAsMap, "history")
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}

View File

@ -0,0 +1,184 @@
package schema1
import (
"encoding/json"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/libtrust"
)
const (
// MediaTypeManifest specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally "application/json".
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// MediaTypeManifestLayer specifies the media type for manifest layers
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
sm := new(SignedManifest)
err := sm.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
desc := distribution.Descriptor{
Digest: digest.FromBytes(sm.Canonical),
Size: int64(len(sm.Canonical)),
MediaType: MediaTypeSignedManifest,
}
return sm, desc, err
}
err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("application/json", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// FSLayer is a container struct for BlobSums defined in an image manifest
type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// History stores unstructured v1 compatibility information
type History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}
// Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type Manifest struct {
manifest.Versioned
// Name is the name of the image's repository
Name string `json:"name"`
// Tag is the tag of the image specified by this manifest
Tag string `json:"tag"`
// Architecture is the host architecture on which this image is intended to
// run
Architecture string `json:"architecture"`
// FSLayers is a list of filesystem layer blobSums contained in this image
FSLayers []FSLayer `json:"fsLayers"`
// History is a list of unstructured historical data for v1 compatibility
History []History `json:"history"`
}
// SignedManifest provides an envelope for a signed image manifest, including
// the format sensitive raw bytes.
type SignedManifest struct {
Manifest
// Canonical is the canonical byte representation of the ImageManifest,
// without any attached signatures. The manifest byte
// representation cannot change or it will have to be re-signed.
Canonical []byte `json:"-"`
// all contains the byte representation of the Manifest including signatures
// and is returned by Payload()
all []byte
}
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
sm.all = make([]byte, len(b), len(b))
// store manifest and signatures in all
copy(sm.all, b)
jsig, err := libtrust.ParsePrettySignature(b, "signatures")
if err != nil {
return err
}
// Resolve the payload in the manifest.
bytes, err := jsig.Payload()
if err != nil {
return err
}
// sm.Canonical stores the canonical manifest JSON
sm.Canonical = make([]byte, len(bytes), len(bytes))
copy(sm.Canonical, bytes)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
return err
}
sm.Manifest = manifest
return nil
}
// References returnes the descriptors of this manifests references
func (sm SignedManifest) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
for i, fsLayer := range sm.FSLayers {
dependencies[i] = distribution.Descriptor{
MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
Digest: fsLayer.BlobSum,
}
}
return dependencies
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
// contents. Applications requiring a marshaled signed manifest should simply
// use Raw directly, since the the content produced by json.Marshal will be
// compacted and will fail signature checks.
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
if len(sm.all) > 0 {
return sm.all, nil
}
// If the raw data is not available, just dump the inner content.
return json.Marshal(&sm.Manifest)
}
// Payload returns the signed content of the signed manifest.
func (sm SignedManifest) Payload() (string, []byte, error) {
return MediaTypeSignedManifest, sm.all, nil
}
// Signatures returns the signatures as provided by
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
// signatures.
func (sm *SignedManifest) Signatures() ([][]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
// Resolve the payload in the manifest.
return jsig.Signatures()
}

View File

@ -0,0 +1,98 @@
package schema1
import (
"fmt"
"errors"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
)
// referenceManifestBuilder is a type for constructing manifests from schema1
// dependencies.
type referenceManifestBuilder struct {
Manifest
pk libtrust.PrivateKey
}
// NewReferenceManifestBuilder is used to build new manifests for the current
// schema version using schema1 dependencies.
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
tag := ""
if tagged, isTagged := ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
return &referenceManifestBuilder{
Manifest: Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: ref.Name(),
Tag: tag,
Architecture: architecture,
},
pk: pk,
}
}
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
m := mb.Manifest
if len(m.FSLayers) == 0 {
return nil, errors.New("cannot build manifest with zero layers or history")
}
m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
m.History = make([]History, len(mb.Manifest.History))
copy(m.FSLayers, mb.Manifest.FSLayers)
copy(m.History, mb.Manifest.History)
return Sign(&m, mb.pk)
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
r, ok := d.(Reference)
if !ok {
return fmt.Errorf("Unable to add non-reference type to v1 builder")
}
// Entries need to be prepended
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil
}
// References returns the current references added to this builder
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
for i := range mb.Manifest.FSLayers {
layerDigest := mb.Manifest.FSLayers[i].BlobSum
history := mb.Manifest.History[i]
ref := Reference{layerDigest, 0, history}
refs[i] = ref.Descriptor()
}
return refs
}
// Reference describes a manifest v2, schema version 1 dependency.
// An FSLayer associated with a history entry.
type Reference struct {
Digest digest.Digest
Size int64 // if we know it, set it for the descriptor.
History History
}
// Descriptor describes a reference
func (r Reference) Descriptor() distribution.Descriptor {
return distribution.Descriptor{
MediaType: MediaTypeManifestLayer,
Digest: r.Digest,
Size: r.Size,
}
}

View File

@ -0,0 +1,68 @@
package schema1
import (
"crypto/x509"
"encoding/json"
"github.com/docker/libtrust"
)
// Sign signs the manifest with the provided private key, returning a
// SignedManifest. This typically won't be used within the registry, except
// for testing.
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.Sign(pk); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}
// SignWithChain signs the manifest with the given private key and x509 chain.
// The public key of the first element in the chain must be the public key
// corresponding with the sign key.
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.SignWithChain(key, chain); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}

View File

@ -0,0 +1,32 @@
package schema1
import (
"crypto/x509"
"github.com/Sirupsen/logrus"
"github.com/docker/libtrust"
)
// Verify verifies the signature of the signed manifest returning the public
// keys used during signing.
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
return nil, err
}
return js.Verify()
}
// VerifyChains verifies the signature of the signed manifest against the
// certificate pool returning the list of verified chains. Signatures without
// an x509 chain are not checked.
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
return js.VerifyChains(ca)
}

View File

@ -0,0 +1,80 @@
package schema2
import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
)
// builder is a type for constructing manifests.
type builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configJSON references
configJSON []byte
// layers is a list of layer descriptors that gets built by successive
// calls to AppendReference.
layers []distribution.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
mb := &builder{
bs: bs,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: SchemaVersion,
Layers: make([]distribution.Descriptor, len(mb.layers)),
}
copy(m.Layers, mb.layers)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = MediaTypeConfig
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = MediaTypeConfig
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *builder) AppendReference(d distribution.Describable) error {
mb.layers = append(mb.layers, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *builder) References() []distribution.Descriptor {
return mb.layers
}

View File

@ -0,0 +1,128 @@
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeConfig specifies the mediaType for the image configuration.
MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
}
// References returnes the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
return m.Layers
}
// Target returns the target of this signed manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b), len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

View File

@ -0,0 +1,12 @@
package manifest
// Versioned provides a struct with the manifest schemaVersion and . Incoming
// content with unknown schema version can be decoded against this struct to
// check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}

13
vendor/github.com/docker/libtrust/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,13 @@
# Contributing to libtrust
Want to hack on libtrust? Awesome! Here are instructions to get you
started.
libtrust is a part of the [Docker](https://www.docker.com) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

191
vendor/github.com/docker/libtrust/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
vendor/github.com/docker/libtrust/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,3 @@
Solomon Hykes <solomon@docker.com>
Josh Hawn <josh@docker.com> (github: jlhawn)
Derek McGowan <derek@docker.com> (github: dmcgowan)

18
vendor/github.com/docker/libtrust/README.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# libtrust
Libtrust is library for managing authentication and authorization using public key cryptography.
Authentication is handled using the identity attached to the public key.
Libtrust provides multiple methods to prove possession of the private key associated with an identity.
- TLS x509 certificates
- Signature verification
- Key Challenge
Authorization and access control is managed through a distributed trust graph.
Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
## Copyright and license
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
Docs released under Creative commons.

175
vendor/github.com/docker/libtrust/certificates.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
package libtrust
import (
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"time"
)
type certTemplateInfo struct {
commonName string
domains []string
ipAddresses []net.IP
isCA bool
clientAuth bool
serverAuth bool
}
func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
// Generate a certificate template which is valid from the past week to
// 10 years from now. The usage of the certificate depends on the
// specified fields in the given certTempInfo object.
var (
keyUsage x509.KeyUsage
extKeyUsage []x509.ExtKeyUsage
)
if info.isCA {
keyUsage = x509.KeyUsageCertSign
}
if info.clientAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
}
if info.serverAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
}
return &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: info.commonName,
},
NotBefore: time.Now().Add(-time.Hour * 24 * 7),
NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
DNSNames: info.domains,
IPAddresses: info.ipAddresses,
IsCA: info.isCA,
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsage,
BasicConstraintsValid: info.isCA,
}
}
func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
pubCertTemplate := generateCertTemplate(subInfo)
privCertTemplate := generateCertTemplate(issInfo)
certDER, err := x509.CreateCertificate(
rand.Reader, pubCertTemplate, privCertTemplate,
pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
)
if err != nil {
return nil, fmt.Errorf("failed to create certificate: %s", err)
}
cert, err = x509.ParseCertificate(certDER)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate: %s", err)
}
return
}
// GenerateSelfSignedServerCert creates a self-signed certificate for the
// given key which is to be used for TLS servers with the given domains and
// IP addresses.
func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
domains: domains,
ipAddresses: ipAddresses,
serverAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateSelfSignedClientCert creates a self-signed certificate for the
// given key which is to be used for TLS clients.
func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
clientAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateCACert creates a certificate which can be used as a trusted
// certificate authority.
func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
subjectInfo := &certTemplateInfo{
commonName: trustedKey.KeyID(),
isCA: true,
}
issuerInfo := &certTemplateInfo{
commonName: signer.KeyID(),
}
return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
}
// GenerateCACertPool creates a certificate authority pool to be used for a
// TLS configuration. Any self-signed certificates issued by the specified
// trusted keys will be verified during a TLS handshake
func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, trustedKey := range trustedKeys {
cert, err := GenerateCACert(signer, trustedKey)
if err != nil {
return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
}
certPool.AddCert(cert)
}
return certPool, nil
}
// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
certificates := []*x509.Certificate{}
var block *pem.Block
block, b = pem.Decode(b)
for ; block != nil; block, b = pem.Decode(b) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
return certificates, nil
}
// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificatePool(filename string) (*x509.CertPool, error) {
certs, err := LoadCertificateBundle(filename)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool, nil
}

9
vendor/github.com/docker/libtrust/doc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
/*
Package libtrust provides an interface for managing authentication and
authorization using public key cryptography. Authentication is handled
using the identity attached to the public key and verified through TLS
x509 certificates, a key challenge, or signature. Authorization and
access control is managed through a trust graph distributed between
both remote trust servers and locally cached and managed data.
*/
package libtrust

428
vendor/github.com/docker/libtrust/ec_key.go generated vendored Normal file
View File

@ -0,0 +1,428 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* EC DSA PUBLIC KEY
*/
// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
// signature algorithms.
type ecPublicKey struct {
*ecdsa.PublicKey
curveName string
signatureAlgorithm *signatureAlgorithm
extended map[string]interface{}
}
func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
curve := cryptoPublicKey.Curve
switch {
case curve == elliptic.P256():
return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
case curve == elliptic.P384():
return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
case curve == elliptic.P521():
return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
default:
return nil, errors.New("unsupported elliptic curve")
}
}
// KeyType returns the key type for elliptic curve keys, i.e., "EC".
func (k *ecPublicKey) KeyType() string {
return "EC"
}
// CurveName returns the elliptic curve identifier.
// Possible values are "P-256", "P-384", and "P-521".
func (k *ecPublicKey) CurveName() string {
return k.curveName
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *ecPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *ecPublicKey) String() string {
return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this
// PublicKey. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be supported
// by this public key. Returns a nil error if the signature is valid.
func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// For EC keys there is only one supported signature algorithm depending
// on the curve parameters.
if k.signatureAlgorithm.HeaderParam() != alg {
return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
}
// signature is the concatenation of (r, s), base64Url encoded.
sigLength := len(signature)
expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
if sigLength != expectedOctetLength {
return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
}
rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
r := new(big.Int).SetBytes(rBytes)
s := new(big.Int).SetBytes(sBytes)
hasher := k.signatureAlgorithm.HashID().New()
_, err := io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
if !ecdsa.Verify(k.PublicKey, hash, r, s) {
return errors.New("invalid signature")
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *ecPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["crv"] = k.CurveName()
xBytes := k.X.Bytes()
yBytes := k.Y.Bytes()
octetLength := (k.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output so that x, y are each
// *octetLength* bytes long.
xBuf := make([]byte, octetLength-len(xBytes), octetLength)
yBuf := make([]byte, octetLength-len(yBytes), octetLength)
xBuf = append(xBuf, xBytes...)
yBuf = append(yBuf, yBytes...)
jwk["x"] = joseBase64UrlEncode(xBuf)
jwk["y"] = joseBase64UrlEncode(yBuf)
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *ecPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
// JWK key type (kty) has already been determined to be "EC".
// Need to extract 'crv', 'x', 'y', and 'kid' and check for
// consistency.
// Get the curve identifier value.
crv, err := stringFromMap(jwk, "crv")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
}
var (
curve elliptic.Curve
sigAlg *signatureAlgorithm
)
switch {
case crv == "P-256":
curve = elliptic.P256()
sigAlg = es256
case crv == "P-384":
curve = elliptic.P384()
sigAlg = es384
case crv == "P-521":
curve = elliptic.P521()
sigAlg = es512
default:
return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
}
// Get the X and Y coordinates for the public key point.
xB64Url, err := stringFromMap(jwk, "x")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
x, err := parseECCoordinate(xB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
yB64Url, err := stringFromMap(jwk, "y")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
y, err := parseECCoordinate(yB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
key := &ecPublicKey{
PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
curveName: crv, signatureAlgorithm: sigAlg,
}
// Key ID is optional too, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
}
}
key.extended = jwk
return key, nil
}
/*
* EC DSA PRIVATE KEY
*/
// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
// algorithms.
type ecPrivateKey struct {
ecPublicKey
*ecdsa.PrivateKey
}
func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
if err != nil {
return nil, err
}
return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *ecPrivateKey) PublicKey() PublicKey {
return &k.ecPublicKey
}
func (k *ecPrivateKey) String() string {
return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the elliptic curve private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the signature
// otherwise the the default hashing algorithm for this key is used. Returns
// the signature and the name of the JWK signature algorithm used, e.g.,
// "ES256", "ES384", "ES512".
func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
// The given hashId is only a suggestion, and since EC keys only support
// on signature/hash algorithm given the curve name, we disregard it for
// the elliptic curve JWK signature implementation.
hasher := k.signatureAlgorithm.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
rBytes, sBytes := r.Bytes(), s.Bytes()
octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
signature = append(rBuf, sBuf...)
alg = k.signatureAlgorithm.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *ecPrivateKey) toMap() map[string]interface{} {
jwk := k.ecPublicKey.toMap()
dBytes := k.D.Bytes()
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := k.ecPublicKey.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
// Create a buffer with the necessary zero-padding.
dBuf := make([]byte, octetLength-len(dBytes), octetLength)
dBuf = append(dBuf, dBytes...)
jwk["d"] = joseBase64UrlEncode(dBuf)
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
}
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
}
func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
dB64Url, err := stringFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key: %s", err)
}
// JWK key type (kty) has already been determined to be "EC".
// Need to extract the public key information, then extract the private
// key value 'd'.
publicKey, err := ecPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
}
key := &ecPrivateKey{
ecPublicKey: *publicKey,
PrivateKey: &ecdsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: d,
},
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
k = new(ecPrivateKey)
k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, err
}
k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
func GenerateECP256PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P256())
if err != nil {
return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
}
k.curveName = "P-256"
k.signatureAlgorithm = es256
return k, nil
}
// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
func GenerateECP384PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P384())
if err != nil {
return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
}
k.curveName = "P-384"
k.signatureAlgorithm = es384
return k, nil
}
// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
func GenerateECP521PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P521())
if err != nil {
return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
}
k.curveName = "P-521"
k.signatureAlgorithm = es512
return k, nil
}

50
vendor/github.com/docker/libtrust/filter.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package libtrust
import (
"path/filepath"
)
// FilterByHosts filters the list of PublicKeys to only those which contain a
// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
// then keys which do not specify any hosts are also returned.
func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
filtered := make([]PublicKey, 0, len(keys))
for _, pubKey := range keys {
var hosts []string
switch v := pubKey.GetExtendedField("hosts").(type) {
case []string:
hosts = v
case []interface{}:
for _, value := range v {
h, ok := value.(string)
if !ok {
continue
}
hosts = append(hosts, h)
}
}
if len(hosts) == 0 {
if includeEmpty {
filtered = append(filtered, pubKey)
}
continue
}
// Check if any hosts match pattern
for _, hostPattern := range hosts {
match, err := filepath.Match(hostPattern, host)
if err != nil {
return nil, err
}
if match {
filtered = append(filtered, pubKey)
continue
}
}
}
return filtered, nil
}

56
vendor/github.com/docker/libtrust/hash.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package libtrust
import (
"crypto"
_ "crypto/sha256" // Registrer SHA224 and SHA256
_ "crypto/sha512" // Registrer SHA384 and SHA512
"fmt"
)
type signatureAlgorithm struct {
algHeaderParam string
hashID crypto.Hash
}
func (h *signatureAlgorithm) HeaderParam() string {
return h.algHeaderParam
}
func (h *signatureAlgorithm) HashID() crypto.Hash {
return h.hashID
}
var (
rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
)
func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
switch {
case alg == "RS256":
return rs256, nil
case alg == "RS384":
return rs384, nil
case alg == "RS512":
return rs512, nil
default:
return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
}
}
func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
switch {
case hashID == crypto.SHA512:
return rs512
case hashID == crypto.SHA384:
return rs384
case hashID == crypto.SHA256:
fallthrough
default:
return rs256
}
}

657
vendor/github.com/docker/libtrust/jsonsign.go generated vendored Normal file
View File

@ -0,0 +1,657 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"sort"
"time"
"unicode"
)
var (
// ErrInvalidSignContent is used when the content to be signed is invalid.
ErrInvalidSignContent = errors.New("invalid sign content")
// ErrInvalidJSONContent is used when invalid json is encountered.
ErrInvalidJSONContent = errors.New("invalid json content")
// ErrMissingSignatureKey is used when the specified signature key
// does not exist in the JSON content.
ErrMissingSignatureKey = errors.New("missing signature key")
)
type jsHeader struct {
JWK PublicKey `json:"jwk,omitempty"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c,omitempty"`
}
type jsSignature struct {
Header jsHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected,omitempty"`
}
type jsSignaturesSorted []jsSignature
func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
si, sj := jsbkid[i].Signature, jsbkid[j].Signature
if ki == kj {
return si < sj
}
return ki < kj
}
type signKey struct {
PrivateKey
Chain []*x509.Certificate
}
// JSONSignature represents a signature of a json object.
type JSONSignature struct {
payload string
signatures []jsSignature
indent string
formatLength int
formatTail []byte
}
func newJSONSignature() *JSONSignature {
return &JSONSignature{
signatures: make([]jsSignature, 0, 1),
}
}
// Payload returns the encoded payload of the signature. This
// payload should not be signed directly
func (js *JSONSignature) Payload() ([]byte, error) {
return joseBase64UrlDecode(js.payload)
}
func (js *JSONSignature) protectedHeader() (string, error) {
protected := map[string]interface{}{
"formatLength": js.formatLength,
"formatTail": joseBase64UrlEncode(js.formatTail),
"time": time.Now().UTC().Format(time.RFC3339),
}
protectedBytes, err := json.Marshal(protected)
if err != nil {
return "", err
}
return joseBase64UrlEncode(protectedBytes), nil
}
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
copy(buf, protectedHeader)
buf[len(protectedHeader)] = '.'
copy(buf[len(protectedHeader)+1:], js.payload)
return buf, nil
}
// Sign adds a signature using the given private key.
func (js *JSONSignature) Sign(key PrivateKey) error {
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
js.signatures = append(js.signatures, jsSignature{
Header: jsHeader{
JWK: key.PublicKey(),
Algorithm: algorithm,
},
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// SignWithChain adds a signature using the given private key
// and setting the x509 chain. The public key of the first element
// in the chain must be the public key corresponding with the sign key.
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
// Ensure key.Chain[0] is public key for key
//key.Chain.PublicKey
//key.PublicKey().CryptoPublicKey()
// Verify chain
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
header := jsHeader{
Chain: make([]string, len(chain)),
Algorithm: algorithm,
}
for i, cert := range chain {
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
}
js.signatures = append(js.signatures, jsSignature{
Header: header,
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// Verify verifies all the signatures and returns the list of
// public keys used to sign. Any x509 chains are not checked.
func (js *JSONSignature) Verify() ([]PublicKey, error) {
keys := make([]PublicKey, len(js.signatures))
for i, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
} else if signature.Header.JWK != nil {
publicKey = signature.Header.JWK
} else {
return nil, errors.New("missing public key")
}
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
keys[i] = publicKey
}
return keys, nil
}
// VerifyChains verifies all the signatures and the chains associated
// with each signature and returns the list of verified chains.
// Signatures without an x509 chain are not checked.
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
chains := make([][]*x509.Certificate, 0, len(js.signatures))
for _, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
intermediates := x509.NewCertPool()
if len(signature.Header.Chain) > 1 {
intermediateChain := signature.Header.Chain[1:]
for i := range intermediateChain {
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
if err != nil {
return nil, err
}
intermediate, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
intermediates.AddCert(intermediate)
}
}
verifyOptions := x509.VerifyOptions{
Intermediates: intermediates,
Roots: ca,
}
verifiedChains, err := cert.Verify(verifyOptions)
if err != nil {
return nil, err
}
chains = append(chains, verifiedChains...)
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
}
}
return chains, nil
}
// JWS returns JSON serialized JWS according to
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
func (js *JSONSignature) JWS() ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("missing signature")
}
sort.Sort(jsSignaturesSorted(js.signatures))
jsonMap := map[string]interface{}{
"payload": js.payload,
"signatures": js.signatures,
}
return json.MarshalIndent(jsonMap, "", " ")
}
func notSpace(r rune) bool {
return !unicode.IsSpace(r)
}
func detectJSONIndent(jsonContent []byte) (indent string) {
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
if quoteIndex > 0 {
indent = string(jsonContent[2 : quoteIndex+1])
}
}
return
}
type jsParsedHeader struct {
JWK json.RawMessage `json:"jwk"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c"`
}
type jsParsedSignature struct {
Header jsParsedHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected"`
}
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
func ParseJWS(content []byte) (*JSONSignature, error) {
type jsParsed struct {
Payload string `json:"payload"`
Signatures []jsParsedSignature `json:"signatures"`
}
parsed := &jsParsed{}
err := json.Unmarshal(content, parsed)
if err != nil {
return nil, err
}
if len(parsed.Signatures) == 0 {
return nil, errors.New("missing signatures")
}
payload, err := joseBase64UrlDecode(parsed.Payload)
if err != nil {
return nil, err
}
js, err := NewJSONSignature(payload)
if err != nil {
return nil, err
}
js.signatures = make([]jsSignature, len(parsed.Signatures))
for i, signature := range parsed.Signatures {
header := jsHeader{
Algorithm: signature.Header.Algorithm,
}
if signature.Header.Chain != nil {
header.Chain = signature.Header.Chain
}
if signature.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
if err != nil {
return nil, err
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signature.Signature,
Protected: signature.Protected,
}
}
return js, nil
}
// NewJSONSignature returns a new unsigned JWS from a json byte array.
// JSONSignature will need to be signed before serializing or storing.
// Optionally, one or more signatures can be provided as byte buffers,
// containing serialized JWS signatures, to assemble a fully signed JWS
// package. It is the callers responsibility to ensure uniqueness of the
// provided signatures.
func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
var dataMap map[string]interface{}
err := json.Unmarshal(content, &dataMap)
if err != nil {
return nil, err
}
js := newJSONSignature()
js.indent = detectJSONIndent(content)
js.payload = joseBase64UrlEncode(content)
// Find trailing } and whitespace, put in protected header
closeIndex := bytes.LastIndexFunc(content, notSpace)
if content[closeIndex] != '}' {
return nil, ErrInvalidJSONContent
}
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
if content[lastRuneIndex] == ',' {
return nil, ErrInvalidJSONContent
}
js.formatLength = lastRuneIndex + 1
js.formatTail = content[js.formatLength:]
if len(signatures) > 0 {
for _, signature := range signatures {
var parsedJSig jsParsedSignature
if err := json.Unmarshal(signature, &parsedJSig); err != nil {
return nil, err
}
// TODO(stevvooe): A lot of the code below is repeated in
// ParseJWS. It will require more refactoring to fix that.
jsig := jsSignature{
Header: jsHeader{
Algorithm: parsedJSig.Header.Algorithm,
},
Signature: parsedJSig.Signature,
Protected: parsedJSig.Protected,
}
if parsedJSig.Header.Chain != nil {
jsig.Header.Chain = parsedJSig.Header.Chain
}
if parsedJSig.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
if err != nil {
return nil, err
}
jsig.Header.JWK = publicKey
}
js.signatures = append(js.signatures, jsig)
}
}
return js, nil
}
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
// struct. JWS will need to be signed before serializing or storing.
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
switch content.(type) {
case map[string]interface{}:
case struct{}:
default:
return nil, errors.New("invalid data type")
}
js := newJSONSignature()
js.indent = " "
payload, err := json.MarshalIndent(content, "", js.indent)
if err != nil {
return nil, err
}
js.payload = joseBase64UrlEncode(payload)
// Remove '\n}' from formatted section, put in protected header
js.formatLength = len(payload) - 2
js.formatTail = payload[js.formatLength:]
return js, nil
}
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
value, ok := m[key]
if !ok {
return 0, false
}
switch v := value.(type) {
case int:
return v, true
case float64:
return int(v), true
default:
return 0, false
}
}
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
value, ok := m[key]
if !ok {
return "", false
}
v, ok = value.(string)
return
}
// ParsePrettySignature parses a formatted signature into a
// JSON signature. If the signatures are missing the format information
// an error is thrown. The formatted signature must be created by
// the same method as format signature.
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
var contentMap map[string]json.RawMessage
err := json.Unmarshal(content, &contentMap)
if err != nil {
return nil, fmt.Errorf("error unmarshalling content: %s", err)
}
sigMessage, ok := contentMap[signatureKey]
if !ok {
return nil, ErrMissingSignatureKey
}
var signatureBlocks []jsParsedSignature
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
if err != nil {
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
}
js := newJSONSignature()
js.signatures = make([]jsSignature, len(signatureBlocks))
for i, signatureBlock := range signatureBlocks {
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
if err != nil {
return nil, fmt.Errorf("base64 decode error: %s", err)
}
var protectedHeader map[string]interface{}
err = json.Unmarshal(protectedBytes, &protectedHeader)
if err != nil {
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
}
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
if !ok {
return nil, errors.New("missing formatted length")
}
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
if !ok {
return nil, errors.New("missing formatted tail")
}
formatTail, err := joseBase64UrlDecode(encodedTail)
if err != nil {
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
}
if js.formatLength == 0 {
js.formatLength = formatLength
} else if js.formatLength != formatLength {
return nil, errors.New("conflicting format length")
}
if len(js.formatTail) == 0 {
js.formatTail = formatTail
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
return nil, errors.New("conflicting format tail")
}
header := jsHeader{
Algorithm: signatureBlock.Header.Algorithm,
Chain: signatureBlock.Header.Chain,
}
if signatureBlock.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
if err != nil {
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signatureBlock.Signature,
Protected: signatureBlock.Protected,
}
}
if js.formatLength > len(content) {
return nil, errors.New("invalid format length")
}
formatted := make([]byte, js.formatLength+len(js.formatTail))
copy(formatted, content[:js.formatLength])
copy(formatted[js.formatLength:], js.formatTail)
js.indent = detectJSONIndent(formatted)
js.payload = joseBase64UrlEncode(formatted)
return js, nil
}
// PrettySignature formats a json signature into an easy to read
// single json serialized object.
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("no signatures")
}
payload, err := joseBase64UrlDecode(js.payload)
if err != nil {
return nil, err
}
payload = payload[:js.formatLength]
sort.Sort(jsSignaturesSorted(js.signatures))
var marshalled []byte
var marshallErr error
if js.indent != "" {
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
} else {
marshalled, marshallErr = json.Marshal(js.signatures)
}
if marshallErr != nil {
return nil, marshallErr
}
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
buf.Write(payload)
buf.WriteByte(',')
if js.indent != "" {
buf.WriteByte('\n')
buf.WriteString(js.indent)
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\": ")
buf.Write(marshalled)
buf.WriteByte('\n')
} else {
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\":")
buf.Write(marshalled)
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
// Signatures provides the signatures on this JWS as opaque blobs, sorted by
// keyID. These blobs can be stored and reassembled with payloads. Internally,
// they are simply marshaled json web signatures but implementations should
// not rely on this.
func (js *JSONSignature) Signatures() ([][]byte, error) {
sort.Sort(jsSignaturesSorted(js.signatures))
var sb [][]byte
for _, jsig := range js.signatures {
p, err := json.Marshal(jsig)
if err != nil {
return nil, err
}
sb = append(sb, p)
}
return sb, nil
}
// Merge combines the signatures from one or more other signatures into the
// method receiver. If the payloads differ for any argument, an error will be
// returned and the receiver will not be modified.
func (js *JSONSignature) Merge(others ...*JSONSignature) error {
merged := js.signatures
for _, other := range others {
if js.payload != other.payload {
return fmt.Errorf("payloads differ from merge target")
}
merged = append(merged, other.signatures...)
}
js.signatures = merged
return nil
}

253
vendor/github.com/docker/libtrust/key.go generated vendored Normal file
View File

@ -0,0 +1,253 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
)
// PublicKey is a generic interface for a Public Key.
type PublicKey interface {
// KeyType returns the key type for this key. For elliptic curve keys,
// this value should be "EC". For RSA keys, this value should be "RSA".
KeyType() string
// KeyID returns a distinct identifier which is unique to this Public Key.
// The format generated by this library is a base32 encoding of a 240 bit
// hash of the public key data divided into 12 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
KeyID() string
// Verify verifyies the signature of the data in the io.Reader using this
// Public Key. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be
// supported by this public key. Returns a nil error if the signature
// is valid.
Verify(data io.Reader, alg string, signature []byte) error
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPublicKey() crypto.PublicKey
// These public keys can be serialized to the standard JSON encoding for
// JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
// Algorithms.
MarshalJSON() ([]byte, error)
// These keys can also be serialized to the standard PEM encoding.
PEMBlock() (*pem.Block, error)
// The string representation of a key is its key type and ID.
String() string
AddExtendedField(string, interface{})
GetExtendedField(string) interface{}
}
// PrivateKey is a generic interface for a Private Key.
type PrivateKey interface {
// A PrivateKey contains all fields and methods of a PublicKey of the
// same type. The MarshalJSON method also outputs the private key as a
// JSON Web Key, and the PEMBlock method outputs the private key as a
// PEM block.
PublicKey
// PublicKey returns the PublicKey associated with this PrivateKey.
PublicKey() PublicKey
// Sign signs the data read from the io.Reader using a signature algorithm
// supported by the private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the
// signature otherwise the the default hashing algorithm for this key is
// used. Returns the signature and identifier of the algorithm used.
Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The
// type is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPrivateKey() crypto.PrivateKey
}
// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
switch cryptoPublicKey := cryptoPublicKey.(type) {
case *ecdsa.PublicKey:
return fromECPublicKey(cryptoPublicKey)
case *rsa.PublicKey:
return fromRSAPublicKey(cryptoPublicKey), nil
default:
return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
}
}
// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
switch cryptoPrivateKey := cryptoPrivateKey.(type) {
case *ecdsa.PrivateKey:
return fromECPrivateKey(cryptoPrivateKey)
case *rsa.PrivateKey:
return fromRSAPrivateKey(cryptoPrivateKey), nil
default:
return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
}
}
// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
// PublicKey or an error if there is a problem with the encoding.
func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
return pubKeyFromPEMBlock(pemBlock)
}
// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
// PEM blocks appended one after the other and returns a slice of PublicKey
// objects that it finds.
func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
pubKeys := []PublicKey{}
for {
var pemBlock *pem.Block
pemBlock, data = pem.Decode(data)
if pemBlock == nil {
break
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
pubKey, err := pubKeyFromPEMBlock(pemBlock)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
// PrivateKey or an error if there is a problem with the encoding.
func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
}
var key PrivateKey
switch {
case pemBlock.Type == "RSA PRIVATE KEY":
rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
}
key = fromRSAPrivateKey(rsaPrivateKey)
case pemBlock.Type == "EC PRIVATE KEY":
ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
}
key, err = fromECPrivateKey(ecPrivateKey)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
}
addPEMHeadersToKey(pemBlock, key.PublicKey())
return key, nil
}
// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
// Public Key to be used with libtrust.
func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Public Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Public Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC public key.
return ecPublicKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA public key.
return rsaPublicKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Public Key type not supported: %q\n", kty,
)
}
}
// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
// and returns a slice of Public Key objects.
func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
rawKeys, err := loadJSONKeySetRaw(data)
if err != nil {
return nil, err
}
pubKeys := make([]PublicKey, 0, len(rawKeys))
for _, rawKey := range rawKeys {
pubKey, err := UnmarshalPublicKeyJWK(rawKey)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
// Private Key to be used with libtrust.
func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Private Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Private Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC private key.
return ecPrivateKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA private key.
return rsaPrivateKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Private Key type not supported: %q\n", kty,
)
}
}

255
vendor/github.com/docker/libtrust/key_files.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
package libtrust
import (
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
// ErrKeyFileDoesNotExist indicates that the private key file does not exist.
ErrKeyFileDoesNotExist = errors.New("key file does not exist")
)
func readKeyFileBytes(filename string) ([]byte, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
if os.IsNotExist(err) {
err = ErrKeyFileDoesNotExist
} else {
err = fmt.Errorf("unable to read key file %s: %s", filename, err)
}
return nil, err
}
return data, nil
}
/*
Loading and Saving of Public and Private Keys in either PEM or JWK format.
*/
// LoadKeyFile opens the given filename and attempts to read a Private Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadKeyFile(filename string) (PrivateKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PrivateKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPrivateKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
}
} else {
key, err = UnmarshalPrivateKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
}
}
return key, nil
}
// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadPublicKeyFile(filename string) (PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PublicKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPublicKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
}
} else {
key, err = UnmarshalPublicKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
}
}
return key, nil
}
// SaveKey saves the given key to a file using the provided filename.
// This process will overwrite any existing file at the provided location.
func SaveKey(filename string, key PrivateKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode private key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode private key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
if err != nil {
return fmt.Errorf("unable to write private key file %s: %s", filename, err)
}
return nil
}
// SavePublicKey saves the given public key to the file.
func SavePublicKey(filename string, key PublicKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode public key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode public key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write public key file %s: %s", filename, err)
}
return nil
}
// Public Key Set files
type jwkSet struct {
Keys []json.RawMessage `json:"keys"`
}
// LoadKeySetFile loads a key set
func LoadKeySetFile(filename string) ([]PublicKey, error) {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return loadJSONKeySetFile(filename)
}
// Must be a PEM format file
return loadPEMKeySetFile(filename)
}
func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
if len(data) == 0 {
// This is okay, just return an empty slice.
return []json.RawMessage{}, nil
}
keySet := jwkSet{}
err := json.Unmarshal(data, &keySet)
if err != nil {
return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
}
return keySet.Keys, nil
}
func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyJWKSet(contents)
}
func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
data, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyPEMBundle(data)
}
// AddKeySetFile adds a key to a key set
func AddKeySetFile(filename string, key PublicKey) error {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return addKeySetJSONFile(filename, key)
}
// Must be a PEM format file
return addKeySetPEMFile(filename, key)
}
func addKeySetJSONFile(filename string, key PublicKey) error {
encodedKey, err := json.Marshal(key)
if err != nil {
return fmt.Errorf("unable to encode trusted client key: %s", err)
}
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return err
}
rawEntries, err := loadJSONKeySetRaw(contents)
if err != nil {
return err
}
rawEntries = append(rawEntries, json.RawMessage(encodedKey))
entriesWrapper := jwkSet{Keys: rawEntries}
encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
if err != nil {
return fmt.Errorf("unable to encode trusted client keys: %s", err)
}
err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
}
return nil
}
func addKeySetPEMFile(filename string, key PublicKey) error {
// Encode to PEM, open file for appending, write PEM.
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
}
defer file.Close()
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encoded trusted key: %s", err)
}
_, err = file.Write(pem.EncodeToMemory(pemBlock))
if err != nil {
return fmt.Errorf("unable to write trusted keys file: %s", err)
}
return nil
}

175
vendor/github.com/docker/libtrust/key_manager.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
package libtrust
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"sync"
)
// ClientKeyManager manages client keys on the filesystem
type ClientKeyManager struct {
key PrivateKey
clientFile string
clientDir string
clientLock sync.RWMutex
clients []PublicKey
configLock sync.Mutex
configs []*tls.Config
}
// NewClientKeyManager loads a new manager from a set of key files
// and managed by the given private key.
func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
m := &ClientKeyManager{
key: trustKey,
clientFile: clientFile,
clientDir: clientDir,
}
if err := m.loadKeys(); err != nil {
return nil, err
}
// TODO Start watching file and directory
return m, nil
}
func (c *ClientKeyManager) loadKeys() (err error) {
// Load authorized keys file
var clients []PublicKey
if c.clientFile != "" {
clients, err = LoadKeySetFile(c.clientFile)
if err != nil {
return fmt.Errorf("unable to load authorized keys: %s", err)
}
}
// Add clients from authorized keys directory
files, err := ioutil.ReadDir(c.clientDir)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("unable to open authorized keys directory: %s", err)
}
for _, f := range files {
if !f.IsDir() {
publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
if err != nil {
return fmt.Errorf("unable to load authorized key file: %s", err)
}
clients = append(clients, publicKey)
}
}
c.clientLock.Lock()
c.clients = clients
c.clientLock.Unlock()
return nil
}
// RegisterTLSConfig registers a tls configuration to manager
// such that any changes to the keys may be reflected in
// the tls client CA pool
func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
c.clientLock.RLock()
certPool, err := GenerateCACertPool(c.key, c.clients)
if err != nil {
return fmt.Errorf("CA pool generation error: %s", err)
}
c.clientLock.RUnlock()
tlsConfig.ClientCAs = certPool
c.configLock.Lock()
c.configs = append(c.configs, tlsConfig)
c.configLock.Unlock()
return nil
}
// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
// libtrust identity authentication for the domain specified
func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
return nil, err
}
// Generate cert
ips, domains, err := parseAddr(addr)
if err != nil {
return nil, err
}
// add domain that it expects clients to use
domains = append(domains, domain)
x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
return tlsConfig, nil
}
// NewCertAuthTLSConfig creates a tls.Config for the server to use for
// certificate authentication
func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
// Verify client certificates against a CA?
if caPath != "" {
certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
}
certPool.AppendCertsFromPEM(file)
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = certPool
}
return tlsConfig, nil
}
func newTLSConfig() *tls.Config {
return &tls.Config{
NextProtos: []string{"http/1.1"},
// Avoid fallback on insecure SSL protocols
MinVersion: tls.VersionTLS10,
}
}
// parseAddr parses an address into an array of IPs and domains
func parseAddr(addr string) ([]net.IP, []string, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, nil, err
}
var domains []string
var ips []net.IP
ip := net.ParseIP(host)
if ip != nil {
ips = []net.IP{ip}
} else {
domains = []string{host}
}
return ips, domains, nil
}

427
vendor/github.com/docker/libtrust/rsa_key.go generated vendored Normal file
View File

@ -0,0 +1,427 @@
package libtrust
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* RSA DSA PUBLIC KEY
*/
// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
type rsaPublicKey struct {
*rsa.PublicKey
extended map[string]interface{}
}
func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
}
// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
func (k *rsaPublicKey) KeyType() string {
return "RSA"
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *rsaPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *rsaPublicKey) String() string {
return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this Public Key.
// The alg parameter should be the name of the JWA digital signature algorithm
// which was used to produce the signature and should be supported by this
// public key. Returns a nil error if the signature is valid.
func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// Verify the signature of the given date, return non-nil error if valid.
sigAlg, err := rsaSignatureAlgorithmByName(alg)
if err != nil {
return fmt.Errorf("unable to verify Signature: %s", err)
}
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
if err != nil {
return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *rsaPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract 'n', 'e', and 'kid' and check for
// consistency.
// Get the modulus parameter N.
nB64Url, err := stringFromMap(jwk, "n")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
n, err := parseRSAModulusParam(nB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
// Get the public exponent E.
eB64Url, err := stringFromMap(jwk, "e")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
e, err := parseRSAPublicExponentParam(eB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
key := &rsaPublicKey{
PublicKey: &rsa.PublicKey{N: n, E: e},
}
// Key ID is optional, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
}
}
if _, ok := jwk["d"]; ok {
return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
}
key.extended = jwk
return key, nil
}
/*
* RSA DSA PRIVATE KEY
*/
// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
type rsaPrivateKey struct {
rsaPublicKey
*rsa.PrivateKey
}
func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
return &rsaPrivateKey{
*fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
cryptoPrivateKey,
}
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *rsaPrivateKey) PublicKey() PublicKey {
return &k.rsaPublicKey
}
func (k *rsaPrivateKey) String() string {
return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the RSA private key. If the specified hashing algorithm is supported by
// this key, that hash function is used to generate the signature otherwise the
// the default hashing algorithm for this key is used. Returns the signature
// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
// "RS512".
func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
alg = sigAlg.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *rsaPrivateKey) toMap() map[string]interface{} {
k.Precompute() // Make sure the precomputed values are stored.
jwk := k.rsaPublicKey.toMap()
jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
otherPrimes := k.Primes[2:]
if len(otherPrimes) > 0 {
otherPrimesInfo := make([]interface{}, len(otherPrimes))
for i, r := range otherPrimes {
otherPrimeInfo := make(map[string]string, 3)
otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
crtVal := k.Precomputed.CRTValues[i]
otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
otherPrimesInfo[i] = otherPrimeInfo
}
jwk["oth"] = otherPrimesInfo
}
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
}
func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
// The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
// only the private key exponent 'd' is REQUIRED, the others are just for
// signature/decryption optimizations and SHOULD be included when the JWK
// is produced. We MAY choose to accept a JWK which only includes 'd', but
// we're going to go ahead and not choose to accept it without the extra
// fields. Only the 'oth' field will be optional (for multi-prime keys).
privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
}
firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
var oth interface{}
if _, ok := jwk["oth"]; ok {
oth = jwk["oth"]
delete(jwk, "oth")
}
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract the public key information, then extract the private
// key values.
publicKey, err := rsaPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
privateKey := &rsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: privateExponent,
Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
Precomputed: rsa.PrecomputedValues{
Dp: firstFactorCRT,
Dq: secondFactorCRT,
Qinv: crtCoeff,
},
}
if oth != nil {
// Should be an array of more JSON objects.
otherPrimesInfo, ok := oth.([]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
}
numOtherPrimeFactors := len(otherPrimesInfo)
if numOtherPrimeFactors == 0 {
return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
}
otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
for i, val := range otherPrimesInfo {
otherPrimeinfo, ok := val.(map[string]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
}
otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
crtValue := crtValues[i]
crtValue.Exp = otherFactorCRT
crtValue.Coeff = otherCrtCoeff
crtValue.R = productOfPrimes
otherPrimeFactors[i] = otherPrimeFactor
productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
}
privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
privateKey.Precomputed.CRTValues = crtValues
}
key := &rsaPrivateKey{
rsaPublicKey: *publicKey,
PrivateKey: privateKey,
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
k = new(rsaPrivateKey)
k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
func GenerateRSA2048PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(2048)
if err != nil {
return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
}
return k, nil
}
// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
func GenerateRSA3072PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(3072)
if err != nil {
return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
}
return k, nil
}
// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
func GenerateRSA4096PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(4096)
if err != nil {
return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
}
return k, nil
}

363
vendor/github.com/docker/libtrust/util.go generated vendored Normal file
View File

@ -0,0 +1,363 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/tls"
"crypto/x509"
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
// LoadOrCreateTrustKey will load a PrivateKey from the specified path
func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
return nil, err
}
trustKey, err := LoadKeyFile(trustKeyPath)
if err == ErrKeyFileDoesNotExist {
trustKey, err = GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("error generating key: %s", err)
}
if err := SaveKey(trustKeyPath, trustKey); err != nil {
return nil, fmt.Errorf("error saving key file: %s", err)
}
dir, file := filepath.Split(trustKeyPath)
if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
return nil, fmt.Errorf("error saving public key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("error loading key file: %s", err)
}
return trustKey, nil
}
// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
// based authentication from the specified dockerUrl, the rootConfigPath and
// the server name to which it is connecting.
// If trustUnknownHosts is true it will automatically add the host to the
// known-hosts.json in rootConfigPath.
func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
trustKeyPath := filepath.Join(rootConfigPath, "key.json")
knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
u, err := url.Parse(dockerUrl)
if err != nil {
return nil, fmt.Errorf("unable to parse machine url")
}
if u.Scheme == "unix" {
return nil, nil
}
addr := u.Host
proto := "tcp"
trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
if err != nil {
return nil, fmt.Errorf("unable to load trust key: %s", err)
}
knownHosts, err := LoadKeySetFile(knownHostsPath)
if err != nil {
return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
}
allowedHosts, err := FilterByHosts(knownHosts, addr, false)
if err != nil {
return nil, fmt.Errorf("error filtering hosts: %s", err)
}
certPool, err := GenerateCACertPool(trustKey, allowedHosts)
if err != nil {
return nil, fmt.Errorf("Could not create CA pool: %s", err)
}
tlsConfig.ServerName = serverName
tlsConfig.RootCAs = certPool
x509Cert, err := GenerateSelfSignedClientCert(trustKey)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
tlsConfig.InsecureSkipVerify = true
testConn, err := tls.Dial(proto, addr, tlsConfig)
if err != nil {
return nil, fmt.Errorf("tls Handshake error: %s", err)
}
opts := x509.VerifyOptions{
Roots: tlsConfig.RootCAs,
CurrentTime: time.Now(),
DNSName: tlsConfig.ServerName,
Intermediates: x509.NewCertPool(),
}
certs := testConn.ConnectionState().PeerCertificates
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
if _, err := certs[0].Verify(opts); err != nil {
if _, ok := err.(x509.UnknownAuthorityError); ok {
if trustUnknownHosts {
pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
if err != nil {
return nil, fmt.Errorf("error extracting public key from cert: %s", err)
}
pubKey.AddExtendedField("hosts", []string{addr})
if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
}
} else {
return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
}
}
}
testConn.Close()
tlsConfig.InsecureSkipVerify = false
return tlsConfig, nil
}
// joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters ommitted in accordance
// with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// joseBase64UrlDecode decodes the given string using the standard base64 url
// decoder but first adds the appropriate number of trailing '=' characters in
// accordance with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlDecode(s string) ([]byte, error) {
s = strings.Replace(s, "\n", "", -1)
s = strings.Replace(s, " ", "", -1)
switch len(s) % 4 {
case 0:
case 2:
s += "=="
case 3:
s += "="
default:
return nil, errors.New("illegal base64url string")
}
return base64.URLEncoding.DecodeString(s)
}
func keyIDEncode(b []byte) string {
s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
var buf bytes.Buffer
var i int
for i = 0; i < len(s)/4-1; i++ {
start := i * 4
end := start + 4
buf.WriteString(s[start:end] + ":")
}
buf.WriteString(s[i*4:])
return buf.String()
}
func keyIDFromCryptoKey(pubKey PublicKey) string {
// Generate and return a 'libtrust' fingerprint of the public key.
// For an RSA key this should be:
// SHA256(DER encoded ASN1)
// Then truncated to 240 bits and encoded into 12 base32 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
if err != nil {
return ""
}
hasher := crypto.SHA256.New()
hasher.Write(derBytes)
return keyIDEncode(hasher.Sum(nil)[:30])
}
func stringFromMap(m map[string]interface{}, key string) (string, error) {
val, ok := m[key]
if !ok {
return "", fmt.Errorf("%q value not specified", key)
}
str, ok := val.(string)
if !ok {
return "", fmt.Errorf("%q value must be a string", key)
}
delete(m, key)
return str, nil
}
func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
curveByteLen := (curve.Params().BitSize + 7) >> 3
cBytes, err := joseBase64UrlDecode(cB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
cByteLength := len(cBytes)
if cByteLength != curveByteLen {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
}
return new(big.Int).SetBytes(cBytes), nil
}
func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
dBytes, err := joseBase64UrlDecode(dB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := curve.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
dByteLength := len(dBytes)
if dByteLength != octetLength {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
}
return new(big.Int).SetBytes(dBytes), nil
}
func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
nBytes, err := joseBase64UrlDecode(nB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(nBytes), nil
}
func serializeRSAPublicExponentParam(e int) []byte {
// We MUST use the minimum number of octets to represent E.
// E is supposed to be 65537 for performance and security reasons
// and is what golang's rsa package generates, but it might be
// different if imported from some other generator.
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(e))
var i int
for i = 0; i < 8; i++ {
if buf[i] != 0 {
break
}
}
return buf[i:]
}
func parseRSAPublicExponentParam(eB64Url string) (int, error) {
eBytes, err := joseBase64UrlDecode(eB64Url)
if err != nil {
return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// Only the minimum number of bytes were used to represent E, but
// binary.BigEndian.Uint32 expects at least 4 bytes, so we need
// to add zero padding if necassary.
byteLen := len(eBytes)
buf := make([]byte, 4-byteLen, 4)
eBytes = append(buf, eBytes...)
return int(binary.BigEndian.Uint32(eBytes)), nil
}
func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
b64Url, err := stringFromMap(m, key)
if err != nil {
return nil, err
}
paramBytes, err := joseBase64UrlDecode(b64Url)
if err != nil {
return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(paramBytes), nil
}
func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
for k, v := range headers {
switch val := v.(type) {
case string:
pemBlock.Headers[k] = val
case []string:
if k == "hosts" {
pemBlock.Headers[k] = strings.Join(val, ",")
} else {
// Return error, non-encodable type
}
default:
// Return error, non-encodable type
}
}
return pemBlock, nil
}
func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
}
pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
if err != nil {
return nil, err
}
addPEMHeadersToKey(pemBlock, pubKey)
return pubKey, nil
}
func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
for key, value := range pemBlock.Headers {
var safeVal interface{}
if key == "hosts" {
safeVal = strings.Split(value, ",")
} else {
safeVal = value
}
pubKey.AddExtendedField(key, safeVal)
}
}

70
vendor/github.com/emicklei/go-restful/.gitignore generated vendored Normal file
View File

@ -0,0 +1,70 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
restful.html
*.out
tmp.prof
go-restful.test
examples/restful-basic-authentication
examples/restful-encoding-filter
examples/restful-filters
examples/restful-hello-world
examples/restful-resource-functions
examples/restful-serve-static
examples/restful-user-service
*.DS_Store
examples/restful-user-resource
examples/restful-multi-containers
examples/restful-form-handling
examples/restful-CORS-filter
examples/restful-options-filter
examples/restful-curly-router
examples/restful-cpuprofiler-service
examples/restful-pre-post-filters
curly.prof
examples/restful-NCSA-logging
examples/restful-html-template
s.html
restful-path-tail

163
vendor/github.com/emicklei/go-restful/CHANGES.md generated vendored Normal file
View File

@ -0,0 +1,163 @@
Change history of go-restful
=
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
- add configurable logging
2015-03-18
- if not specified, the Operation is derived from the Route function
2015-03-17
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
- customize rendering of ServiceError
- JSR311 router now handles wildcards
- add Notes to Route
2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
- add tracing functionality (injectable) for debugging purposes
- support JSON parse 64bit int
- fix empty parameters for swagger
- WebServicesUrl is now optional for swagger
- fixed duplicate AccessControlAllowOrigin in CORS
- (api change) expose ServeMux in container
- (api add) added AllowedDomains in CORS
- (api add) ParameterNamed for detailed documentation
2014-04-16
- (api add) expose constructor of Request for testing.
2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container.
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- (api add) Added support for request/response filter functions
2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits
2012-11-14
- Initial commit

22
vendor/github.com/emicklei/go-restful/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012,2013 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

74
vendor/github.com/emicklei/go-restful/README.md generated vendored Normal file
View File

@ -0,0 +1,74 @@
go-restful
==========
package for building REST-style Web Services using Google Go
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
- PUT = Create if you are sending the full content of the specified resource (URI).
- PUT = Update if you are updating the full content of the specified resource.
- DELETE = Delete if you are requesting the server to delete the resource
- PATCH = Update partial content of a resource
- OPTIONS = Get information about the communication options for the request URI
### Example
```Go
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser).
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Writes(User{}))
...
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
```
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
### Features
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default)
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see swagger package)
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable encoding using EntityReaderWriter registration
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
Type ```git shortlog -s``` for a full list of contributors.

1
vendor/github.com/emicklei/go-restful/Srcfile generated vendored Normal file
View File

@ -0,0 +1 @@
{"SkipDirs": ["examples"]}

10
vendor/github.com/emicklei/go-restful/bench_test.sh generated vendored Normal file
View File

@ -0,0 +1,10 @@
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
go test -c
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
#go tool pprof go-restful.test tmp.prof
go tool pprof go-restful.test curly.prof

123
vendor/github.com/emicklei/go-restful/compress.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
"net"
"net/http"
"strings"
)
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
var EnableContentEncoding = false
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
type CompressingResponseWriter struct {
writer http.ResponseWriter
compressor io.WriteCloser
encoding string
}
// Header is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) Header() http.Header {
return c.writer.Header()
}
// WriteHeader is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) WriteHeader(status int) {
c.writer.WriteHeader(status)
}
// Write is part of http.ResponseWriter interface
// It is passed through the compressor
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
if c.isCompressorClosed() {
return -1, errors.New("Compressing error: tried to write data using closed compressor")
}
return c.compressor.Write(bytes)
}
// CloseNotify is part of http.CloseNotifier interface
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
return errors.New("Compressing error: tried to close already closed compressor")
}
c.compressor.Close()
if ENCODING_GZIP == c.encoding {
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
}
if ENCODING_DEFLATE == c.encoding {
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
}
// gc hint needed?
c.compressor = nil
return nil
}
func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
gi := strings.Index(header, ENCODING_GZIP)
zi := strings.Index(header, ENCODING_DEFLATE)
// use in order of appearance
if gi == -1 {
return zi != -1, ENCODING_DEFLATE
} else if zi == -1 {
return gi != -1, ENCODING_GZIP
} else {
if gi < zi {
return true, ENCODING_GZIP
}
return true, ENCODING_DEFLATE
}
}
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
c := new(CompressingResponseWriter)
c.writer = httpWriter
var err error
if ENCODING_GZIP == encoding {
w := currentCompressorProvider.AcquireGzipWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_GZIP
} else if ENCODING_DEFLATE == encoding {
w := currentCompressorProvider.AcquireZlibWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_DEFLATE
} else {
return nil, errors.New("Unknown encoding:" + encoding)
}
return c, err
}

View File

@ -0,0 +1,103 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
// of writers and readers (resources).
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
type BoundedCachedCompressors struct {
gzipWriters chan *gzip.Writer
gzipReaders chan *gzip.Reader
zlibWriters chan *zlib.Writer
writersCapacity int
readersCapacity int
}
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
b := &BoundedCachedCompressors{
gzipWriters: make(chan *gzip.Writer, writersCapacity),
gzipReaders: make(chan *gzip.Reader, readersCapacity),
zlibWriters: make(chan *zlib.Writer, writersCapacity),
writersCapacity: writersCapacity,
readersCapacity: readersCapacity,
}
for ix := 0; ix < writersCapacity; ix++ {
b.gzipWriters <- newGzipWriter()
b.zlibWriters <- newZlibWriter()
}
for ix := 0; ix < readersCapacity; ix++ {
b.gzipReaders <- newGzipReader()
}
return b
}
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
var writer *gzip.Writer
select {
case writer, _ = <-b.gzipWriters:
default:
// return a new unmanaged one
writer = newGzipWriter()
}
return writer
}
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
// forget the unmanaged ones
if len(b.gzipWriters) < b.writersCapacity {
b.gzipWriters <- w
}
}
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
var reader *gzip.Reader
select {
case reader, _ = <-b.gzipReaders:
default:
// return a new unmanaged one
reader = newGzipReader()
}
return reader
}
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
// forget the unmanaged ones
if len(b.gzipReaders) < b.readersCapacity {
b.gzipReaders <- r
}
}
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
var writer *zlib.Writer
select {
case writer, _ = <-b.zlibWriters:
default:
// return a new unmanaged one
writer = newZlibWriter()
}
return writer
}
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
// forget the unmanaged ones
if len(b.zlibWriters) < b.writersCapacity {
b.zlibWriters <- w
}
}

View File

@ -0,0 +1,91 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/gzip"
"compress/zlib"
"sync"
)
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
type SyncPoolCompessors struct {
GzipWriterPool *sync.Pool
GzipReaderPool *sync.Pool
ZlibWriterPool *sync.Pool
}
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
func NewSyncPoolCompessors() *SyncPoolCompessors {
return &SyncPoolCompessors{
GzipWriterPool: &sync.Pool{
New: func() interface{} { return newGzipWriter() },
},
GzipReaderPool: &sync.Pool{
New: func() interface{} { return newGzipReader() },
},
ZlibWriterPool: &sync.Pool{
New: func() interface{} { return newZlibWriter() },
},
}
}
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
return s.GzipWriterPool.Get().(*gzip.Writer)
}
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
s.GzipWriterPool.Put(w)
}
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
return s.GzipReaderPool.Get().(*gzip.Reader)
}
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
s.GzipReaderPool.Put(r)
}
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
return s.ZlibWriterPool.Get().(*zlib.Writer)
}
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
s.ZlibWriterPool.Put(w)
}
func newGzipWriter() *gzip.Writer {
// create with an empty bytes writer; it will be replaced before using the gzipWriter
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}
func newGzipReader() *gzip.Reader {
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
// we can safely use currentCompressProvider because it is set on package initialization.
w := currentCompressorProvider.AcquireGzipWriter()
defer currentCompressorProvider.ReleaseGzipWriter(w)
b := new(bytes.Buffer)
w.Reset(b)
w.Flush()
w.Close()
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
if err != nil {
panic(err.Error())
}
return reader
}
func newZlibWriter() *zlib.Writer {
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}

53
vendor/github.com/emicklei/go-restful/compressors.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
var currentCompressorProvider CompressorProvider
func init() {
currentCompressorProvider = NewSyncPoolCompessors()
}
// CurrentCompressorProvider returns the current CompressorProvider.
// It is initialized using a SyncPoolCompessors.
func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")
}
currentCompressorProvider = p
}

30
vendor/github.com/emicklei/go-restful/constants.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
ENCODING_GZIP = "gzip"
ENCODING_DEFLATE = "deflate"
)

361
vendor/github.com/emicklei/go-restful/container.go generated vendored Normal file
View File

@ -0,0 +1,361 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"sync"
"github.com/emicklei/go-restful/log"
)
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
// The requests are further dispatched to routes of WebServices using a RouteSelector
type Container struct {
webServicesLock sync.RWMutex
webServices []*WebService
ServeMux *http.ServeMux
isRegisteredOnRoot bool
containerFilters []FilterFunction
doNotRecover bool // default is false
recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction
router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
contentEncodingEnabled bool // default is false
}
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false,
containerFilters: []FilterFunction{},
doNotRecover: false,
recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError,
router: RouterJSR311{},
contentEncodingEnabled: false}
}
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
// The first argument is what recover() returns. The second must be used to communicate an error response.
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
// RecoverHandler changes the default function (logStackOnRecover) to be called
// when a panic is detected. DoNotRecover must be have its default value (=false).
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
c.recoverHandleFunc = handler
}
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
// The first argument is the service error, the second is the request that resulted in the error and
// the third must be used to communicate an error response.
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
// ServiceErrorHandler changes the default function (writeServiceError) to be called
// when a ServiceError is detected.
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
c.serviceErrorHandleFunc = handler
}
// DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation.
// Default value is false = recover from panics. This has performance implications.
func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
// Router changes the default Router (currently RouterJSR311)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled
}
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// If not registered on root then add specific mapping
if !c.isRegisteredOnRoot {
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
// addHandler may set a new HandleFunc for the serveMux
// this function must run inside the critical region protected by the webServicesLock.
// returns true if the function was registered on root ("/")
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
pattern := fixedPrefixPath(service.RootPath())
// check if root path registration is needed
if "/" == pattern || "" == pattern {
serveMux.HandleFunc("/", c.dispatch)
return true
}
// detect if registration already exists
alreadyMapped := false
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
alreadyMapped = true
break
}
}
if !alreadyMapped {
serveMux.HandleFunc(pattern, c.dispatch)
if !strings.HasSuffix(pattern, "/") {
serveMux.HandleFunc(pattern+"/", c.dispatch)
}
}
return false
}
func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Printf(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// build a new ServeMux and re-register all WebServices
newServeMux := http.NewServeMux()
newServices := []*WebService{}
newIsRegisteredOnRoot := false
for _, each := range c.webServices {
if each.rootPath != ws.rootPath {
// If not registered on root then add specific mapping
if !newIsRegisteredOnRoot {
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
}
newServices = append(newServices, each)
}
}
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil
}
// logStackOnRecover is the default RecoverHandleFunction and is called
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
// Default implementation logs the stacktrace and writes the stacktrace on the response.
// This may be a security issue as it exposes sourcecode information.
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
for i := 2; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
}
log.Print(buffer.String())
httpWriter.WriteHeader(http.StatusInternalServerError)
httpWriter.Write(buffer.Bytes())
}
// writeServiceError is the default ServiceErrorHandleFunction and is called
// when a ServiceError is returned during route selection. Default implementation
// calls resp.WriteErrorString(err.Code, err.Message)
func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
// CompressingResponseWriter should be closed after all operations are done
defer func() {
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
compressWriter.Close()
}
}()
// Instal panic recovery unless told otherwise
if !c.doNotRecover { // catch all for 500 response
defer func() {
if r := recover(); r != nil {
c.recoverHandleFunc(r, writer)
return
}
}()
}
// Install closing the request body (if any)
defer func() {
if nil != httpRequest.Body {
httpRequest.Body.Close()
}
}()
// Detect if compression is needed
// assume without compression, test for override
if c.contentEncodingEnabled {
doCompress, encoding := wantsCompressedResponse(httpRequest)
if doCompress {
var err error
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
if err != nil {
log.Print("[restful] unable to install compressor: ", err)
httpWriter.WriteHeader(http.StatusInternalServerError)
return
}
}
}
// Find best match Route ; err is non nil if no match was found
var webService *WebService
var route *Route
var err error
func() {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
webService, route, err = c.router.SelectRoute(
c.webServices,
httpRequest)
}()
if err != nil {
// a non-200 response has already been written
// run container filters anyway ; they should not touch the response...
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
switch err.(type) {
case ServiceError:
ser := err.(ServiceError)
c.serviceErrorHandleFunc(ser, req, resp)
}
// TODO
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
return
}
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
// pass through filters (if any)
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
// compose filter chain
allFilters := []FilterFunction{}
allFilters = append(allFilters, c.containerFilters...)
allFilters = append(allFilters, webService.filters...)
allFilters = append(allFilters, route.Filters...)
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
// handle request by route after passing all filters
route.Function(wrappedRequest, wrappedResponse)
}}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// no filters, handle request by route
route.Function(wrappedRequest, wrappedResponse)
}
}
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{")
if -1 == varBegin {
return pathspec
}
return pathspec[:varBegin]
}
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
}
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
func (c Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler)
}
// HandleWithFilter registers the handler for the given pattern.
// Container's filter chain is applied for handler.
// If a handler already exists for pattern, HandleWithFilter panics.
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
if len(c.containerFilters) == 0 {
handler.ServeHTTP(httpResponse, httpRequest)
return
}
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
handler.ServeHTTP(httpResponse, httpRequest)
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
}
c.Handle(pattern, http.HandlerFunc(f))
}
// Filter appends a container FilterFunction. These are called before dispatching
// a http.Request to a WebService from the container
func (c *Container) Filter(filter FilterFunction) {
c.containerFilters = append(c.containerFilters, filter)
}
// RegisteredWebServices returns the collections of added WebServices
func (c Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices))
for ix := range c.webServices {
result[ix] = c.webServices[ix]
}
return result
}
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
func (c Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{}
requestPath := req.Request.URL.Path
for _, ws := range c.RegisteredWebServices() {
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
finalMatch := matches[len(matches)-1]
for _, rt := range ws.Routes() {
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
if matches != nil {
lastMatch := matches[len(matches)-1]
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor /.
methods = append(methods, rt.Method)
}
}
}
}
}
// methods = append(methods, "OPTIONS") not sure about this
return methods
}
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
// It is basic because no parameter or (produces) content-type information is given.
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
resp := NewResponse(httpWriter)
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
return NewRequest(httpRequest), resp
}

202
vendor/github.com/emicklei/go-restful/cors_filter.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"regexp"
"strconv"
"strings"
)
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
//
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
// http://enable-cors.org/server.html
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool
Container *Container
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
}
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
origin := req.Request.Header.Get(HEADER_Origin)
if len(origin) == 0 {
if trace {
traceLogger.Print("no Http header Origin set")
}
chain.ProcessFilter(req, resp)
return
}
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
if trace {
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
}
chain.ProcessFilter(req, resp)
return
}
if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
c.doPreflightRequest(req, resp)
} else {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
}
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
c.setOptionsHeaders(req, resp)
// continue processing the response
}
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 {
if c.Container == nil {
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
} else {
c.AllowedMethods = c.Container.computeAllowedMethods(req)
}
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestMethod,
acrm,
c.AllowedMethods)
}
return
}
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
if len(acrhs) > 0 {
for _, each := range strings.Split(acrhs, ",") {
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestHeaders,
acrhs,
c.AllowedHeaders)
}
return
}
}
}
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
c.setOptionsHeaders(req, resp)
// return http 200 response, no body
}
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
c.checkAndSetExposeHeaders(resp)
c.setAllowOriginHeader(req, resp)
c.checkAndSetAllowCredentials(resp)
if c.MaxAge > 0 {
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
}
}
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(origin) == 0 {
return false
}
if len(c.AllowedDomains) == 0 {
return true
}
allowed := false
for _, domain := range c.AllowedDomains {
if domain == origin {
allowed = true
break
}
}
if !allowed {
if len(c.allowedOriginPatterns) == 0 {
// compile allowed domains to allowed origin patterns
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
if err != nil {
return false
}
c.allowedOriginPatterns = allowedOriginRegexps
}
for _, pattern := range c.allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
}
return allowed
}
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
origin := req.Request.Header.Get(HEADER_Origin)
if c.isOriginAllowed(origin) {
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
}
}
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
if len(c.ExposeHeaders) > 0 {
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
}
}
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
if c.CookiesAllowed {
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
}
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
for _, each := range allowedMethods {
if each == method {
return true
}
}
return false
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
for _, each := range c.AllowedHeaders {
if strings.ToLower(each) == strings.ToLower(header) {
return true
}
}
return false
}
// Take a list of strings and compile them into a list of regular expressions.
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
regexps := []*regexp.Regexp{}
for _, regexpStr := range regexpStrings {
r, err := regexp.Compile(regexpStr)
if err != nil {
return regexps, err
}
regexps = append(regexps, r)
}
return regexps, nil
}

2
vendor/github.com/emicklei/go-restful/coverage.sh generated vendored Normal file
View File

@ -0,0 +1,2 @@
go test -coverprofile=coverage.out
go tool cover -html=coverage.out

162
vendor/github.com/emicklei/go-restful/curly.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
"regexp"
"sort"
"strings"
)
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
type CurlyRouter struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (c CurlyRouter) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
requestTokens := tokenizePath(httpRequest.URL.Path)
detectedService := c.detectWebService(requestTokens, webServices)
if detectedService == nil {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
}
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
if len(candidateRoutes) == 0 {
if trace {
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
}
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
if selectedRoute == nil {
return detectedService, nil, err
}
return detectedService, selectedRoute, nil
}
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) []Route {
candidates := &sortableCurlyRoutes{[]*curlyRoute{}}
for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches {
candidates.add(&curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(sort.Reverse(candidates))
return candidates.routes()
}
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
if len(routeTokens) < len(requestTokens) {
// proceed in matching only if last routeToken is wildcard
count := len(routeTokens)
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
return false, 0, 0
}
// proceed
}
for i, routeToken := range routeTokens {
if i == len(requestTokens) {
// reached end of request path
return false, 0, 0
}
requestToken := requestTokens[i]
if strings.HasPrefix(routeToken, "{") {
paramCount++
if colon := strings.Index(routeToken, ":"); colon != -1 {
// match by regex
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
if !matchesToken {
return false, 0, 0
}
if matchesRemainder {
break
}
}
} else { // no { prefix
if requestToken != routeToken {
return false, 0, 0
}
staticCount++
}
}
return true, paramCount, staticCount
}
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
regPart := routeToken[colon+1 : len(routeToken)-1]
if regPart == "*" {
if trace {
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
}
return true, true
}
matched, err := regexp.MatchString(regPart, requestToken)
return (matched && err == nil), false
}
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go
func (c CurlyRouter) detectRoute(candidateRoutes []Route, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute
return RouterJSR311{}.detectRoute(candidateRoutes, httpRequest)
}
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
var best *WebService
score := -1
for _, each := range webServices {
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
if matches && (eachScore > score) {
best = each
score = eachScore
}
}
return best
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
if len(tokens) > len(requestTokens) {
return false, 0
}
score := 0
for i := 0; i < len(tokens); i++ {
each := requestTokens[i]
other := tokens[i]
if len(each) == 0 && len(other) == 0 {
score++
continue
}
if len(other) > 0 && strings.HasPrefix(other, "{") {
// no empty match
if len(each) == 0 {
return false, score
}
score += 1
} else {
// not a parameter
if each != other {
return false, score
}
score += (len(tokens) - i) * 10 //fuzzy
}
}
return true, score
}

54
vendor/github.com/emicklei/go-restful/curly_route.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
type curlyRoute struct {
route Route
paramCount int
staticCount int
}
type sortableCurlyRoutes struct {
candidates []*curlyRoute
}
func (s *sortableCurlyRoutes) add(route *curlyRoute) {
s.candidates = append(s.candidates, route)
}
func (s *sortableCurlyRoutes) routes() (routes []Route) {
for _, each := range s.candidates {
routes = append(routes, each.route) // TODO change return type
}
return routes
}
func (s *sortableCurlyRoutes) Len() int {
return len(s.candidates)
}
func (s *sortableCurlyRoutes) Swap(i, j int) {
s.candidates[i], s.candidates[j] = s.candidates[j], s.candidates[i]
}
func (s *sortableCurlyRoutes) Less(i, j int) bool {
ci := s.candidates[i]
cj := s.candidates[j]
// primary key
if ci.staticCount < cj.staticCount {
return true
}
if ci.staticCount > cj.staticCount {
return false
}
// secundary key
if ci.paramCount < cj.paramCount {
return true
}
if ci.paramCount > cj.paramCount {
return false
}
return ci.route.Path < cj.route.Path
}

196
vendor/github.com/emicklei/go-restful/doc.go generated vendored Normal file
View File

@ -0,0 +1,196 @@
/*
Package restful, a lean package for creating REST-style WebServices without magic.
WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
WebServices must be added to a container (see below) in order to handler Http requests from a server.
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
This package has the logic to find the best matching Route and if found, call its Function.
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
...
// GET http://localhost:8080/users/1
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
The Default container of go-restful uses the http.DefaultServeMux.
You can create your own Container and create a new http.Server for that particular container.
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
In the restful package there are three hooks into the request,response flow where filters can be added.
Each filter must define a FilterFunction:
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
Use the following statement to pass the request,response pair to the next filter or RouteFunction
chain.ProcessFilter(req, resp)
Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
Route Filters
These are processed before calling the function associated with the Route.
// install 2 chained route filters (processed before calling findUser)
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
restful.DefaultContainer.EnableContentEncoding(true)
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
400: Bad Request
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
404: Not Found
Despite a valid URI, the resource requested may not be available
500: Internal Server Error
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
405: Method Not Allowed
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
406: Not Acceptable
The request does not have or has an unknown Accept Header set for this operation.
415: Unsupported Media Type
The request does not have or has an unknown Content-Type Header set for this operation.
ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.Router(CurlyRouter{})
The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
restful.DefaultContainer.DoNotRecover(true)
DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to true, Route functions are responsible for handling any error situation.
Default value is false; it will recover from panics. This has performance implications.
restful.SetCacheReadEntity(false)
SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
Resources
[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful

View File

@ -0,0 +1,163 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"encoding/json"
"encoding/xml"
"strings"
"sync"
)
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
// The Request may have a decompressing reader. Depends on Content-Encoding.
Read(req *Request, v interface{}) error
// Write a serialized version of the value on the response.
// The Response may have a compressing writer. Depends on Accept-Encoding.
// status should be a valid Http Status code
Write(resp *Response, status int, v interface{}) error
}
// entityAccessRegistry is a singleton
var entityAccessRegistry = &entityReaderWriters{
protection: new(sync.RWMutex),
accessors: map[string]EntityReaderWriter{},
}
// entityReaderWriters associates MIME to an EntityReaderWriter
type entityReaderWriters struct {
protection *sync.RWMutex
accessors map[string]EntityReaderWriter
}
func init() {
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
}
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.protection.Lock()
defer entityAccessRegistry.protection.Unlock()
entityAccessRegistry.accessors[mime] = erw
}
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
// This package is already initialized with such an accessor using the MIME_JSON contentType.
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
return entityJSONAccess{ContentType: contentType}
}
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
// This package is already initialized with such an accessor using the MIME_XML contentType.
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
return entityXMLAccess{ContentType: contentType}
}
// accessorAt returns the registered ReaderWriter for this MIME type.
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock()
defer r.protection.RUnlock()
er, ok := r.accessors[mime]
if !ok {
// retry with reverse lookup
// more expensive but we are in an exceptional situation anyway
for k, v := range r.accessors {
if strings.Contains(mime, k) {
return v, true
}
}
}
return er, ok
}
// entityXMLAccess is a EntityReaderWriter for XML encoding
type entityXMLAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from XML
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
return xml.NewDecoder(req.Request.Body).Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
return writeXML(resp, status, e.ContentType, v)
}
// writeXML marshalls the value to JSON and set the Content-Type Header.
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write([]byte(xml.Header))
if err != nil {
return err
}
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return xml.NewEncoder(resp).Encode(v)
}
// entityJSONAccess is a EntityReaderWriter for JSON encoding
type entityJSONAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from JSON
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
decoder := json.NewDecoder(req.Request.Body)
decoder.UseNumber()
return decoder.Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
return writeJSON(resp, status, e.ContentType, v)
}
// write marshalls the value to JSON and set the Content-Type Header.
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := json.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return json.NewEncoder(resp).Encode(v)
}

26
vendor/github.com/emicklei/go-restful/filter.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
type FilterChain struct {
Filters []FilterFunction // ordered list of FilterFunction
Index int // index into filters that is currently in progress
Target RouteFunction // function to call after passing all filters
}
// ProcessFilter passes the request,response pair through the next of Filters.
// Each filter can decide to proceed to the next Filter or handle the Response itself.
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
if f.Index < len(f.Filters) {
f.Index++
f.Filters[f.Index-1](request, response, f)
} else {
f.Target(request, response)
}
}
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain)

9
vendor/github.com/emicklei/go-restful/install.sh generated vendored Normal file
View File

@ -0,0 +1,9 @@
cd examples
ls *.go | xargs -I {} go build -o /tmp/ignore {}
cd ..
go fmt ...swagger && \
go test -test.v ...swagger && \
go install ...swagger && \
go fmt ...restful && \
go test -test.v ...restful && \
go install ...restful

247
vendor/github.com/emicklei/go-restful/jsr311.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"fmt"
"net/http"
"sort"
)
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
// RouterJSR311 implements the Router interface.
// Concept of locators is not implemented.
type RouterJSR311 struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (r RouterJSR311) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
// Identify the root resource class (WebService)
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
if err != nil {
return nil, nil, NewError(http.StatusNotFound, "")
}
// Obtain the set of candidate methods (Routes)
routes := r.selectRoutes(dispatcher, finalMatch)
if len(routes) == 0 {
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
// Identify the method (Route) that will handle the request
route, ok := r.detectRoute(routes, httpRequest)
return dispatcher, route, ok
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
// http method
methodOk := []Route{}
for _, each := range routes {
if httpRequest.Method == each.Method {
methodOk = append(methodOk, each)
}
}
if len(methodOk) == 0 {
if trace {
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
}
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
}
inputMediaOk := methodOk
// content-type
contentType := httpRequest.Header.Get(HEADER_ContentType)
inputMediaOk = []Route{}
for _, each := range methodOk {
if each.matchesContentType(contentType) {
inputMediaOk = append(inputMediaOk, each)
}
}
if len(inputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
}
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept)
if accept == "" {
accept = "*/*"
}
for _, each := range inputMediaOk {
if each.matchesAccept(accept) {
outputMediaOk = append(outputMediaOk, each)
}
}
if len(outputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
}
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
}
return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
// n/m > n/* > */*
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
// TODO
return &routes[0]
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
filtered := &sortableRouteCandidates{}
for _, each := range dispatcher.Routes() {
pathExpr := each.pathExpr
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
if matches != nil {
lastMatch := matches[len(matches)-1]
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor /.
filtered.candidates = append(filtered.candidates,
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
}
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
}
return []Route{}
}
sort.Sort(sort.Reverse(filtered))
// select other routes from candidates whoes expression matches rmatch
matchingRoutes := []Route{filtered.candidates[0].route}
for c := 1; c < len(filtered.candidates); c++ {
each := filtered.candidates[c]
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
matchingRoutes = append(matchingRoutes, each.route)
}
}
return matchingRoutes
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
filtered := &sortableDispatcherCandidates{}
for _, each := range dispatchers {
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
filtered.candidates = append(filtered.candidates,
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
}
return nil, "", errors.New("not found")
}
sort.Sort(sort.Reverse(filtered))
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
}
// Types and functions to support the sorting of Routes
type routeCandidate struct {
route Route
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
func (r routeCandidate) expressionToMatch() string {
return r.route.pathExpr.Source
}
func (r routeCandidate) String() string {
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
}
type sortableRouteCandidates struct {
candidates []routeCandidate
}
func (rcs *sortableRouteCandidates) Len() int {
return len(rcs.candidates)
}
func (rcs *sortableRouteCandidates) Swap(i, j int) {
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
}
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
ci := rcs.candidates[i]
cj := rcs.candidates[j]
// primary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// secundary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// tertiary key
if ci.nonDefaultCount < cj.nonDefaultCount {
return true
}
if ci.nonDefaultCount > cj.nonDefaultCount {
return false
}
// quaternary key ("source" is interpreted as Path)
return ci.route.Path < cj.route.Path
}
// Types and functions to support the sorting of Dispatchers
type dispatcherCandidate struct {
dispatcher *WebService
finalMatch string
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
type sortableDispatcherCandidates struct {
candidates []dispatcherCandidate
}
func (dc *sortableDispatcherCandidates) Len() int {
return len(dc.candidates)
}
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
}
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
ci := dc.candidates[i]
cj := dc.candidates[j]
// primary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// secundary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// tertiary key
return ci.nonDefaultCount < cj.nonDefaultCount
}

31
vendor/github.com/emicklei/go-restful/log/log.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package log
import (
stdlog "log"
"os"
)
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
}
var Logger StdLogger
func init() {
// default Logger
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
func Print(v ...interface{}) {
Logger.Print(v...)
}
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}

32
vendor/github.com/emicklei/go-restful/logger.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package restful
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"github.com/emicklei/go-restful/log"
)
var trace bool = false
var traceLogger log.StdLogger
func init() {
traceLogger = log.Logger // use the package logger by default
}
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
func TraceLogger(logger log.StdLogger) {
traceLogger = logger
EnableTracing(logger != nil)
}
// expose the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}
// EnableTracing can be used to Trace logging on and off.
func EnableTracing(enabled bool) {
trace = enabled
}

45
vendor/github.com/emicklei/go-restful/mime.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package restful
import (
"strconv"
"strings"
)
type mime struct {
media string
quality float64
}
// insertMime adds a mime to a list and keeps it sorted by quality.
func insertMime(l []mime, e mime) []mime {
for i, each := range l {
// if current mime has lower quality then insert before
if e.quality > each.quality {
left := append([]mime{}, l[0:i]...)
return append(append(left, e), l[i:]...)
}
}
return append(l, e)
}
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
func sortedMimes(accept string) (sorted []mime) {
for _, each := range strings.Split(accept, ",") {
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
if len(typeAndQuality) == 1 {
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
} else {
// take factor
parts := strings.Split(typeAndQuality[1], "=")
if len(parts) == 2 {
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
} else {
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
}
}
}
}
return
}

View File

@ -0,0 +1,26 @@
package restful
import "strings"
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// As for any filter, you can also install it for a particular WebService within a Container.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
if "OPTIONS" != req.Request.Method {
chain.ProcessFilter(req, resp)
return
}
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
}
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func OPTIONSFilter() FilterFunction {
return DefaultContainer.OPTIONSFilter
}

114
vendor/github.com/emicklei/go-restful/parameter.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
// PathParameterKind = indicator of Request parameter type "path"
PathParameterKind = iota
// QueryParameterKind = indicator of Request parameter type "query"
QueryParameterKind
// BodyParameterKind = indicator of Request parameter type "body"
BodyParameterKind
// HeaderParameterKind = indicator of Request parameter type "header"
HeaderParameterKind
// FormParameterKind = indicator of Request parameter type "form"
FormParameterKind
)
// Parameter is for documententing the parameter used in a Http Request
// ParameterData kinds are Path,Query and Body
type Parameter struct {
data *ParameterData
}
// ParameterData represents the state of a Parameter.
// It is made public to make it accessible to e.g. the Swagger package.
type ParameterData struct {
Name, Description, DataType, DataFormat string
Kind int
Required bool
AllowableValues map[string]string
AllowMultiple bool
DefaultValue string
}
// Data returns the state of the Parameter
func (p *Parameter) Data() ParameterData {
return *p.data
}
// Kind returns the parameter type indicator (see const for valid values)
func (p *Parameter) Kind() int {
return p.data.Kind
}
func (p *Parameter) bePath() *Parameter {
p.data.Kind = PathParameterKind
return p
}
func (p *Parameter) beQuery() *Parameter {
p.data.Kind = QueryParameterKind
return p
}
func (p *Parameter) beBody() *Parameter {
p.data.Kind = BodyParameterKind
return p
}
func (p *Parameter) beHeader() *Parameter {
p.data.Kind = HeaderParameterKind
return p
}
func (p *Parameter) beForm() *Parameter {
p.data.Kind = FormParameterKind
return p
}
// Required sets the required field and returns the receiver
func (p *Parameter) Required(required bool) *Parameter {
p.data.Required = required
return p
}
// AllowMultiple sets the allowMultiple field and returns the receiver
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
p.data.AllowMultiple = multiple
return p
}
// AllowableValues sets the allowableValues field and returns the receiver
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
p.data.AllowableValues = values
return p
}
// DataType sets the dataType field and returns the receiver
func (p *Parameter) DataType(typeName string) *Parameter {
p.data.DataType = typeName
return p
}
// DataFormat sets the dataFormat field for Swagger UI
func (p *Parameter) DataFormat(formatName string) *Parameter {
p.data.DataFormat = formatName
return p
}
// DefaultValue sets the default value field and returns the receiver
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
p.data.DefaultValue = stringRepresentation
return p
}
// Description sets the description value field and returns the receiver
func (p *Parameter) Description(doc string) *Parameter {
p.data.Description = doc
return p
}

View File

@ -0,0 +1,69 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// PathExpression holds a compiled path expression (RegExp) needed to match against
// Http request paths and to extract path parameter values.
type pathExpression struct {
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
VarCount int // the number of named parameters (enclosed by {}) in the path
Matcher *regexp.Regexp
Source string // Path as defined by the RouteBuilder
tokens []string
}
// NewPathExpression creates a PathExpression from the input URL path.
// Returns an error if the path is invalid.
func newPathExpression(path string) (*pathExpression, error) {
expression, literalCount, varCount, tokens := templateToRegularExpression(path)
compiled, err := regexp.Compile(expression)
if err != nil {
return nil, err
}
return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
var buffer bytes.Buffer
buffer.WriteString("^")
//tokens = strings.Split(template, "/")
tokens = tokenizePath(template)
for _, each := range tokens {
if each == "" {
continue
}
buffer.WriteString("/")
if strings.HasPrefix(each, "{") {
// check for regular expression in variable
colon := strings.Index(each, ":")
if colon != -1 {
// extract expression
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
if paramExpr == "*" { // special case
buffer.WriteString("(.*)")
} else {
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
}
} else {
// plain var
buffer.WriteString("([^/]+?)")
}
varCount += 1
} else {
literalCount += len(each)
encoded := each // TODO URI encode
buffer.WriteString(regexp.QuoteMeta(encoded))
}
}
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
}

131
vendor/github.com/emicklei/go-restful/request.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/zlib"
"io/ioutil"
"net/http"
)
var defaultRequestContentType string
var doCacheReadEntityBytes = true
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
}
func NewRequest(httpRequest *http.Request) *Request {
return &Request{
Request: httpRequest,
pathParameters: map[string]string{},
attributes: map[string]interface{}{},
} // empty parameters, attributes
}
// If ContentType is missing or */* is given then fall back to this type, otherwise
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
func SetCacheReadEntity(doCache bool) {
doCacheReadEntityBytes = doCache
}
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
}
// PathParameters accesses the Path parameter values
func (r *Request) PathParameters() map[string]string {
return r.pathParameters
}
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
return r.Request.FormValue(name)
}
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
func (r *Request) BodyParameter(name string) (string, error) {
err := r.Request.ParseForm()
if err != nil {
return "", err
}
return r.Request.PostFormValue(name), nil
}
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
func (r *Request) HeaderParameter(name string) string {
return r.Request.Header.Get(name)
}
// ReadEntity checks the Accept header and reads the content into the entityPointer.
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// OLD feature, cache the body for reads
if doCacheReadEntityBytes {
if r.bodyContent == nil {
data, err := ioutil.ReadAll(r.Request.Body)
if err != nil {
return err
}
r.bodyContent = &data
}
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
}
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
gzipReader.Reset(r.Request.Body)
r.Request.Body = gzipReader
} else if ENCODING_DEFLATE == contentEncoding {
zlibReader, err := zlib.NewReader(r.Request.Body)
if err != nil {
return err
}
r.Request.Body = zlibReader
}
// lookup the EntityReader
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
}
return entityReader.Read(r, entityPointer)
}
// SetAttribute adds or replaces the attribute with the given value.
func (r *Request) SetAttribute(name string, value interface{}) {
r.attributes[name] = value
}
// Attribute returns the value associated to the given name. Returns nil if absent.
func (r Request) Attribute(name string) interface{} {
return r.attributes[name]
}
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
func (r Request) SelectedRoutePath() string {
return r.selectedRoutePath
}

235
vendor/github.com/emicklei/go-restful/response.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"net/http"
)
// DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
var PrettyPrintResponses = true
// Response is a wrapper on the actual http ResponseWriter
// It provides several convenience methods to prepare and write response content.
type Response struct {
http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called
}
// Creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
}
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultResponseContentType(restful.MIME_JSON)
func DefaultResponseContentType(mime string) {
DefaultResponseMimeType = mime
}
// InternalServerError writes the StatusInternalServerError header.
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
func (r Response) InternalServerError() Response {
r.WriteHeader(http.StatusInternalServerError)
return r
}
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty
}
// AddHeader is a shortcut for .Header().Add(header,value)
func (r Response) AddHeader(header string, value string) Response {
r.Header().Add(header, value)
return r
}
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
func (r *Response) SetRequestAccepts(mime string) {
r.requestAccept = mime
}
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
sorted := sortedMimes(r.requestAccept)
for _, eachAccept := range sorted {
for _, eachProduce := range r.routeProduces {
if eachProduce == eachAccept.media {
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
return w, true
}
}
}
if eachAccept.media == "*/*" {
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
}
}
// if requestAccept is empty
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok {
// if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON {
return entityAccessRegistry.accessorAt(MIME_JSON)
}
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
}
}
return writer, ok
}
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
func (r *Response) WriteEntity(value interface{}) error {
return r.WriteHeaderAndEntity(http.StatusOK, value)
}
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
// Current implementation ignores any q-parameters in the Accept Header.
// Returns an error if the value could not be written on the response.
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
writer, ok := r.EntityWriter()
if !ok {
r.WriteHeader(http.StatusNotAcceptable)
return nil
}
return writer.Write(r, status, value)
}
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value)
}
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value)
}
// WriteAsJson is a convenience method for writing a value in json.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value)
}
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value)
}
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
return writeJSON(r, status, contentType, value)
}
// WriteError write the http status and the error string on the response.
func (r *Response) WriteError(httpStatus int, err error) error {
r.err = err
return r.WriteErrorString(httpStatus, err.Error())
}
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
r.err = err
return r.WriteHeaderAndEntity(httpStatus, err)
}
// WriteErrorString is a convenience method for an error status with the actual error
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
if r.err == nil {
// if not called from WriteError
r.err = errors.New(errorReason)
}
r.WriteHeader(httpStatus)
if _, err := r.Write([]byte(errorReason)); err != nil {
return err
}
return nil
}
// Flush implements http.Flusher interface, which sends any buffered data to the client.
func (r *Response) Flush() {
if f, ok := r.ResponseWriter.(http.Flusher); ok {
f.Flush()
} else if trace {
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
}
}
// WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) {
r.statusCode = httpStatus
r.ResponseWriter.WriteHeader(httpStatus)
}
// StatusCode returns the code that has been written using WriteHeader.
func (r Response) StatusCode() int {
if 0 == r.statusCode {
// no status code has been written yet; assume OK
return http.StatusOK
}
return r.statusCode
}
// Write writes the data to the connection as part of an HTTP reply.
// Write is part of http.ResponseWriter interface.
func (r *Response) Write(bytes []byte) (int, error) {
written, err := r.ResponseWriter.Write(bytes)
r.contentLength += written
return written, err
}
// ContentLength returns the number of bytes written for the response content.
// Note that this value is only correct if all data is written through the Response using its Write* methods.
// Data written directly using the underlying http.ResponseWriter is not accounted for.
func (r Response) ContentLength() int {
return r.contentLength
}
// CloseNotify is part of http.CloseNotifier interface
func (r Response) CloseNotify() <-chan bool {
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Error returns the err created by WriteError
func (r Response) Error() error {
return r.err
}

183
vendor/github.com/emicklei/go-restful/route.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"net/http"
"strings"
)
// RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response)
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct {
Method string
Produces []string
Consumes []string
Path string // webservice root path + described path
Function RouteFunction
Filters []FilterFunction
// cached values for dispatching
relativePath string
pathParts []string
pathExpr *pathExpression // cached compilation of relativePath as RegExp
// documentation
Doc string
Notes string
Operation string
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
}
// Initialize for Route
func (r *Route) postBuild() {
r.pathParts = tokenizePath(r.Path)
}
// Create Request and Response from their http versions
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
params := r.extractParameters(httpRequest.URL.Path)
wrappedRequest := NewRequest(httpRequest)
wrappedRequest.pathParameters = params
wrappedRequest.selectedRoutePath = r.Path
wrappedResponse := NewResponse(httpWriter)
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
wrappedResponse.routeProduces = r.Produces
return wrappedRequest, wrappedResponse
}
// dispatchWithFilters call the function after passing through its own filters
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
if len(r.Filters) > 0 {
chain := FilterChain{Filters: r.Filters, Target: r.Function}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// unfiltered
r.Function(wrappedRequest, wrappedResponse)
}
}
// Return whether the mimeType matches to what this Route can produce.
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
parts := strings.Split(mimeTypesWithQuality, ",")
for _, each := range parts {
var withoutQuality string
if strings.Contains(each, ";") {
withoutQuality = strings.Split(each, ";")[0]
} else {
withoutQuality = each
}
// trim before compare
withoutQuality = strings.Trim(withoutQuality, " ")
if withoutQuality == "*/*" {
return true
}
for _, producibleType := range r.Produces {
if producibleType == "*/*" || producibleType == withoutQuality {
return true
}
}
}
return false
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
// did not specify what it can consume ; any media type (“*/*”) is assumed
return true
}
if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true
}
// proceed with default
mimeTypes = MIME_OCTET
}
parts := strings.Split(mimeTypes, ",")
for _, each := range parts {
var contentType string
if strings.Contains(each, ";") {
contentType = strings.Split(each, ";")[0]
} else {
contentType = each
}
// trim before compare
contentType = strings.Trim(contentType, " ")
for _, consumeableType := range r.Consumes {
if consumeableType == "*/*" || consumeableType == contentType {
return true
}
}
}
return false
}
// Extract the parameters from the request url path
func (r Route) extractParameters(urlPath string) map[string]string {
urlParts := tokenizePath(urlPath)
pathParameters := map[string]string{}
for i, key := range r.pathParts {
var value string
if i >= len(urlParts) {
value = ""
} else {
value = urlParts[i]
}
if strings.HasPrefix(key, "{") { // path-parameter
if colon := strings.Index(key, ":"); colon != -1 {
// extract by regex
regPart := key[colon+1 : len(key)-1]
keyPart := key[1:colon]
if regPart == "*" {
pathParameters[keyPart] = untokenizePath(i, urlParts)
break
} else {
pathParameters[keyPart] = value
}
} else {
// without enclosing {}
pathParameters[key[1:len(key)-1]] = value
}
}
}
return pathParameters
}
// Untokenize back into an URL path using the slash separator
func untokenizePath(offset int, parts []string) string {
var buffer bytes.Buffer
for p := offset; p < len(parts); p++ {
buffer.WriteString(parts[p])
// do not end
if p < len(parts)-1 {
buffer.WriteString("/")
}
}
return buffer.String()
}
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
func tokenizePath(path string) []string {
if "/" == path {
return []string{}
}
return strings.Split(strings.Trim(path, "/"), "/")
}
// for debugging
func (r Route) String() string {
return r.Method + " " + r.Path
}

240
vendor/github.com/emicklei/go-restful/route_builder.go generated vendored Normal file
View File

@ -0,0 +1,240 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"os"
"reflect"
"runtime"
"strings"
"github.com/emicklei/go-restful/log"
)
// RouteBuilder is a helper to construct Routes.
type RouteBuilder struct {
rootPath string
currentPath string
produces []string
consumes []string
httpMethod string // required
function RouteFunction // required
filters []FilterFunction
// documentation
doc string
notes string
operation string
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
}
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
// func Returns500(b *RouteBuilder) {
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
}
return b
}
// To bind the route to a function.
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
b.function = function
return b
}
// Method specifies what HTTP method to match. Required.
func (b *RouteBuilder) Method(method string) *RouteBuilder {
b.httpMethod = method
return b
}
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
b.produces = mimeTypes
return b
}
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
b.consumes = mimeTypes
return b
}
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
b.currentPath = subPath
return b
}
// Doc tells what this route is all about. Optional.
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
b.doc = documentation
return b
}
// A verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes
return b
}
// Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
b.readSample = sample
typeAsName := reflect.TypeOf(sample).String()
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody()
bodyParameter.Required(true)
bodyParameter.DataType(typeAsName)
b.Param(bodyParameter)
return b
}
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
// Use this to modify or extend information for the Parameter (through its Data()).
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
for _, each := range b.parameters {
if each.Data().Name == name {
return each
}
}
return p
}
// Writes tells what resource type will be written as the response payload. Optional.
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
b.writeSample = sample
return b
}
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
if b.parameters == nil {
b.parameters = []*Parameter{}
}
b.parameters = append(b.parameters, parameter)
return b
}
// Operation allows you to document what the acutal method/function call is of the Route.
// Unless called, the operation name is derived from the RouteFunction set using To(..).
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
b.operation = name
return b
}
// ReturnsError is deprecated, use Returns instead.
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
log.Print("ReturnsError is deprecated, use Returns instead.")
return b.Returns(code, message, model)
}
// Returns allows you to document what responses (errors or regular) can be expected.
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
err := ResponseError{
Code: code,
Message: message,
Model: model,
}
// lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil {
b.errorMap = map[int]ResponseError{}
}
b.errorMap[code] = err
return b
}
type ResponseError struct {
Code int
Message string
Model interface{}
}
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
b.rootPath = path
return b
}
// Filter appends a FilterFunction to the end of filters for this Route to build.
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
b.filters = append(b.filters, filter)
return b
}
// If no specific Route path then set to rootPath
// If no specific Produces then set to rootProduces
// If no specific Consumes then set to rootConsumes
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
if len(b.produces) == 0 {
b.produces = rootProduces
}
if len(b.consumes) == 0 {
b.consumes = rootConsumes
}
}
// Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath)
if err != nil {
log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
os.Exit(1)
}
if b.function == nil {
log.Printf("[restful] No function specified for route:" + b.currentPath)
os.Exit(1)
}
operationName := b.operation
if len(operationName) == 0 && b.function != nil {
// extract from definition
operationName = nameOfFunction(b.function)
}
route := Route{
Method: b.httpMethod,
Path: concatPath(b.rootPath, b.currentPath),
Produces: b.produces,
Consumes: b.consumes,
Function: b.function,
Filters: b.filters,
relativePath: b.currentPath,
pathExpr: pathExpr,
Doc: b.doc,
Notes: b.notes,
Operation: operationName,
ParameterDocs: b.parameters,
ResponseErrors: b.errorMap,
ReadSample: b.readSample,
WriteSample: b.writeSample}
route.postBuild()
return route
}
func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
}
// nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string {
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
tokenized := strings.Split(fun.Name(), ".")
last := tokenized[len(tokenized)-1]
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5
return last
}

18
vendor/github.com/emicklei/go-restful/router.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "net/http"
// A RouteSelector finds the best matching Route given the input HTTP Request
type RouteSelector interface {
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
// It returns a selected Route and its containing WebService or an error indicating
// a problem.
SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
}

23
vendor/github.com/emicklei/go-restful/service_error.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "fmt"
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
type ServiceError struct {
Code int
Message string
}
// NewError returns a ServiceError using the code and reason
func NewError(code int, message string) ServiceError {
return ServiceError{Code: code, Message: message}
}
// Error returns a text representation of the service error
func (s ServiceError) Error() string {
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
}

View File

@ -0,0 +1,43 @@
Change history of swagger
=
2015-10-16
- add type override mechanism for swagger models (MR 254, nathanejohnson)
- replace uses of wildcard in generated apidocs (issue 251)
2015-05-25
- (api break) changed the type of Properties in Model
- (api break) changed the type of Models in ApiDeclaration
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
2015-04-09
- add ModelBuildable interface for customization of Model
2015-03-17
- preserve order of Routes per WebService in Swagger listing
- fix use of $ref and type in Swagger models
- add api version to listing
2014-11-14
- operation parameters are now sorted using ordering path,query,form,header,body
2014-11-12
- respect omitempty tag value for embedded structs
- expose ApiVersion of WebService to Swagger ApiDeclaration
2014-05-29
- (api add) Ability to define custom http.Handler to serve swagger-ui static files
2014-05-04
- (fix) include model for array element type of response
2014-01-03
- (fix) do not add primitive type to the Api models
2013-11-27
- (fix) make Swagger work for WebServices with root ("/" or "") paths
2013-10-29
- (api add) package variable LogInfo to customize logging function
2013-10-15
- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)

View File

@ -0,0 +1,76 @@
How to use Swagger UI with go-restful
=
Get the Swagger UI sources (version 1.2 only)
git clone https://github.com/wordnik/swagger-ui.git
The project contains a "dist" folder.
Its contents has all the Swagger UI files you need.
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
config := swagger.Config{
WebServices: restful.RegisteredWebServices(),
ApiPath: "/apidocs.json",
SwaggerPath: "/apidocs/",
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
swagger.InstallSwaggerService(config)
Documenting Structs
--
Currently there are 2 ways to document your structs in the go-restful Swagger.
###### By using struct tags
- Use tag "description" to annotate a struct field with a description to show in the UI
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
###### By using the SwaggerDoc method
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
type Address struct {
Country string `json:"country,omitempty"`
PostCode int `json:"postcode,omitempty"`
}
func (Address) SwaggerDoc() map[string]string {
return map[string]string{
"": "Address doc",
"country": "Country doc",
"postcode": "PostCode doc",
}
}
This example will generate a JSON like this
{
"Address": {
"id": "Address",
"description": "Address doc",
"properties": {
"country": {
"type": "string",
"description": "Country doc"
},
"postcode": {
"type": "integer",
"format": "int32",
"description": "PostCode doc"
}
}
}
}
**Very Important Notes:**
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
Notes
--
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.

View File

@ -0,0 +1,64 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
type ApiDeclarationList struct {
List []ApiDeclaration
}
// At returns the ApiDeclaration by its path unless absent, then ok is false
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
for _, each := range l.List {
if each.ResourcePath == path {
return each, true
}
}
return a, false
}
// Put adds or replaces a ApiDeclaration with this name
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
// maybe replace existing
for i, each := range l.List {
if each.ResourcePath == path {
// replace
l.List[i] = a
return
}
}
// add
l.List = append(l.List, a)
}
// Do enumerates all the properties, each with its assigned name
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
for _, each := range l.List {
block(each.ResourcePath, each)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.ResourcePath)
buf.WriteString("\": ")
encoder.Encode(each)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}

View File

@ -0,0 +1,34 @@
package swagger
import (
"net/http"
"github.com/emicklei/go-restful"
)
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
type Config struct {
// url where the services are available, e.g. http://localhost:8080
// if left empty then the basePath of Swagger is taken from the actual request
WebServicesUrl string
// path where the JSON api is avaiable , e.g. /apidocs
ApiPath string
// [optional] path where the swagger UI will be served, e.g. /swagger
SwaggerPath string
// [optional] location of folder containing Swagger HTML5 application index.html
SwaggerFilePath string
// api listing is constructed from this list of restful WebServices.
WebServices []*restful.WebService
// will serve all static content (scripts,pages,images)
StaticHandler http.Handler
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
DisableCORS bool
// Top-level API version. Is reflected in the resource listing.
ApiVersion string
// If set then call this handler after building the complete ApiDeclaration Map
PostBuildHandler PostBuildDeclarationMapFunc
// Swagger global info struct
Info Info
}

View File

@ -0,0 +1,430 @@
package swagger
import (
"encoding/json"
"reflect"
"strings"
)
// ModelBuildable is used for extending Structs that need more control over
// how the Model appears in the Swagger api declaration.
type ModelBuildable interface {
PostBuildModel(m *Model) *Model
}
type modelBuilder struct {
Models *ModelList
}
type documentable interface {
SwaggerDoc() map[string]string
}
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
// If it exists, retrive the documentation and overwrite all struct tag descriptions
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
return docable.SwaggerDoc()
}
return make(map[string]string)
}
// addModelFrom creates and adds a Model to the builder and detects and calls
// the post build hook for customizations
func (b modelBuilder) addModelFrom(sample interface{}) {
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
// allow customizations
if buildable, ok := sample.(ModelBuildable); ok {
modelOrNil = buildable.PostBuildModel(modelOrNil)
b.Models.Put(modelOrNil.Id, *modelOrNil)
}
}
}
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
modelName := b.keyFrom(st)
if nameOverride != "" {
modelName = nameOverride
}
// no models needed for primitive types
if b.isPrimitiveType(modelName) {
return nil
}
// see if we already have visited this model
if _, ok := b.Models.At(modelName); ok {
return nil
}
sm := Model{
Id: modelName,
Required: []string{},
Properties: ModelPropertyList{}}
// reference the model before further initializing (enables recursive structs)
b.Models.Put(modelName, sm)
// check for slice or array
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
b.addModel(st.Elem(), "")
return &sm
}
// check for structure or primitive type
if st.Kind() != reflect.Struct {
return &sm
}
fullDoc := getDocFromMethodSwaggerDoc2(st)
modelDescriptions := []string{}
for i := 0; i < st.NumField(); i++ {
field := st.Field(i)
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
if len(modelDescription) > 0 {
modelDescriptions = append(modelDescriptions, modelDescription)
}
// add if not omitted
if len(jsonName) != 0 {
// update description
if fieldDoc, ok := fullDoc[jsonName]; ok {
prop.Description = fieldDoc
}
// update Required
if b.isPropertyRequired(field) {
sm.Required = append(sm.Required, jsonName)
}
sm.Properties.Put(jsonName, prop)
}
}
// We always overwrite documentation if SwaggerDoc method exists
// "" is special for documenting the struct itself
if modelDoc, ok := fullDoc[""]; ok {
sm.Description = modelDoc
} else if len(modelDescriptions) != 0 {
sm.Description = strings.Join(modelDescriptions, "\n")
}
// update model builder with completed model
b.Models.Put(modelName, sm)
return &sm
}
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
required := true
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "omitempty" {
return false
}
}
return required
}
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
jsonName = b.jsonNameOfField(field)
if len(jsonName) == 0 {
// empty name signals skip property
return "", "", prop
}
if tag := field.Tag.Get("modelDescription"); tag != "" {
modelDescription = tag
}
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, modelDescription, prop
}
fieldType := field.Type
// check if type is doing its own marshalling
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
if fieldType.Implements(marshalerType) {
var pType = "string"
if prop.Type == nil {
prop.Type = &pType
}
if prop.Format == "" {
prop.Format = b.jsonSchemaFormat(fieldType.String())
}
return jsonName, modelDescription, prop
}
// check if annotation says it is a string
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "string" {
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
}
}
fieldKind := fieldType.Kind()
switch {
case fieldKind == reflect.Struct:
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
return jsonName, modelDescription, prop
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.Ptr:
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.String:
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
case fieldKind == reflect.Map:
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
objectType := "object"
prop.Type = &objectType
return jsonName, modelDescription, prop
}
if b.isPrimitiveType(fieldType.String()) {
mapped := b.jsonSchemaType(fieldType.String())
prop.Type = &mapped
prop.Format = b.jsonSchemaFormat(fieldType.String())
return jsonName, modelDescription, prop
}
modelType := fieldType.String()
prop.Ref = &modelType
if fieldType.Name() == "" { // override type of anonymous structs
nestedTypeName := modelName + "." + jsonName
prop.Ref = &nestedTypeName
b.addModel(fieldType, nestedTypeName)
}
return jsonName, modelDescription, prop
}
func hasNamedJSONTag(field reflect.StructField) bool {
parts := strings.Split(field.Tag.Get("json"), ",")
if len(parts) == 0 {
return false
}
for _, s := range parts[1:] {
if s == "inline" {
return false
}
}
return len(parts[0]) > 0
}
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tag
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// check for anonymous
if len(fieldType.Name()) == 0 {
// anonymous
anonType := model.Id + "." + jsonName
b.addModel(fieldType, anonType)
prop.Ref = &anonType
return jsonName, prop
}
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
// embedded struct
sub := modelBuilder{new(ModelList)}
sub.addModel(fieldType, "")
subKey := sub.keyFrom(fieldType)
// merge properties from sub
subModel, _ := sub.Models.At(subKey)
subModel.Properties.Do(func(k string, v ModelProperty) {
model.Properties.Put(k, v)
// if subModel says this property is required then include it
required := false
for _, each := range subModel.Required {
if k == each {
required = true
break
}
}
if required {
model.Required = append(model.Required, k)
}
})
// add all new referenced models
sub.Models.Do(func(key string, sub Model) {
if key != subKey {
if _, ok := b.Models.At(key); !ok {
b.Models.Put(key, sub)
}
}
})
// empty name signals skip property
return "", prop
}
// simple struct
b.addModel(fieldType, "")
var pType = fieldType.String()
prop.Ref = &pType
return jsonName, prop
}
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
// check for type override in tags
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
prop.Items = new(Item)
if isPrimitive {
mapped := b.jsonSchemaType(elemTypeName)
prop.Items.Type = &mapped
} else {
prop.Items.Ref = &elemTypeName
}
// add|overwrite model for element type
if fieldType.Elem().Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
if !isPrimitive {
b.addModel(fieldType.Elem(), elemTypeName)
}
return jsonName, prop
}
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tags
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// override type of pointer to list-likes
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
if isPrimitive {
primName := b.jsonSchemaType(elemName)
prop.Items = &Item{Ref: &primName}
} else {
prop.Items = &Item{Ref: &elemName}
}
if !isPrimitive {
// add|overwrite model for element type
b.addModel(fieldType.Elem().Elem(), elemName)
}
} else {
// non-array, pointer type
var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
if b.isPrimitiveType(fieldType.String()[1:]) {
prop.Type = &pType
prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
return jsonName, prop
}
prop.Ref = &pType
elemName := ""
if fieldType.Elem().Name() == "" {
elemName = modelName + "." + jsonName
prop.Ref = &elemName
}
b.addModel(fieldType.Elem(), elemName)
}
return jsonName, prop
}
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
if t.Kind() == reflect.Ptr {
return t.String()[1:]
}
if t.Name() == "" {
return modelName + "." + jsonName
}
return b.keyFrom(t)
}
func (b modelBuilder) keyFrom(st reflect.Type) string {
key := st.String()
if len(st.Name()) == 0 { // unnamed type
// Swagger UI has special meaning for [
key = strings.Replace(key, "[]", "||", -1)
}
return key
}
// see also https://golang.org/ref/spec#Numeric_types
func (b modelBuilder) isPrimitiveType(modelName string) bool {
if len(modelName) == 0 {
return false
}
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
}
// jsonNameOfField returns the name of the field as it should appear in JSON format
// An empty string indicates that this field is not part of the JSON representation
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if s[0] == "-" {
// empty name signals skip property
return ""
} else if s[0] != "" {
return s[0]
}
}
return field.Name
}
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
func (b modelBuilder) jsonSchemaType(modelName string) string {
schemaMap := map[string]string{
"uint": "integer",
"uint8": "integer",
"uint16": "integer",
"uint32": "integer",
"uint64": "integer",
"int": "integer",
"int8": "integer",
"int16": "integer",
"int32": "integer",
"int64": "integer",
"byte": "integer",
"float64": "number",
"float32": "number",
"bool": "boolean",
"time.Time": "string",
}
mapped, ok := schemaMap[modelName]
if !ok {
return modelName // use as is (custom or struct)
}
return mapped
}
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
schemaMap := map[string]string{
"int": "int32",
"int32": "int32",
"int64": "int64",
"byte": "byte",
"uint": "integer",
"uint8": "byte",
"float64": "double",
"float32": "float",
"time.Time": "date-time",
"*time.Time": "date-time",
}
mapped, ok := schemaMap[modelName]
if !ok {
return "" // no format
}
return mapped
}

View File

@ -0,0 +1,86 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModel associates a name with a Model (not using its Id)
type NamedModel struct {
Name string
Model Model
}
// ModelList encapsulates a list of NamedModel (association)
type ModelList struct {
List []NamedModel
}
// Put adds or replaces a Model by its name
func (l *ModelList) Put(name string, model Model) {
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModel{name, model}
return
}
}
// add
l.List = append(l.List, NamedModel{name, model})
}
// At returns a Model by its name, ok is false if absent
func (l *ModelList) At(name string) (m Model, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Model, true
}
}
return m, false
}
// Do enumerates all the models, each with its assigned name
func (l *ModelList) Do(block func(name string, value Model)) {
for _, each := range l.List {
block(each.Name, each.Model)
}
}
// MarshalJSON writes the ModelList as if it was a map[string]Model
func (l ModelList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Model)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
func (l *ModelList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m Model
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@ -0,0 +1,66 @@
package swagger
import (
"reflect"
"strings"
)
func (prop *ModelProperty) setDescription(field reflect.StructField) {
if tag := field.Tag.Get("description"); tag != "" {
prop.Description = tag
}
}
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
if tag := field.Tag.Get("default"); tag != "" {
prop.DefaultValue = Special(tag)
}
}
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
// We use | to separate the enum values. This value is chosen
// since its unlikely to be useful in actual enumeration values.
if tag := field.Tag.Get("enum"); tag != "" {
prop.Enum = strings.Split(tag, "|")
}
}
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
if tag := field.Tag.Get("maximum"); tag != "" {
prop.Maximum = tag
}
}
func (prop *ModelProperty) setType(field reflect.StructField) {
if tag := field.Tag.Get("type"); tag != "" {
prop.Type = &tag
}
}
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
if tag := field.Tag.Get("minimum"); tag != "" {
prop.Minimum = tag
}
}
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
tag := field.Tag.Get("unique")
switch tag {
case "true":
v := true
prop.UniqueItems = &v
case "false":
v := false
prop.UniqueItems = &v
}
}
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
prop.setDescription(field)
prop.setEnumValues(field)
prop.setMinimum(field)
prop.setMaximum(field)
prop.setUniqueItems(field)
prop.setDefaultValue(field)
prop.setType(field)
}

View File

@ -0,0 +1,87 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModelProperty associates a name to a ModelProperty
type NamedModelProperty struct {
Name string
Property ModelProperty
}
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
type ModelPropertyList struct {
List []NamedModelProperty
}
// At returns the ModelPropety by its name unless absent, then ok is false
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Property, true
}
}
return p, false
}
// Put adds or replaces a ModelProperty with this name
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
// maybe replace existing
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModelProperty{Name: name, Property: prop}
return
}
}
// add
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
}
// Do enumerates all the properties, each with its assigned name
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
for _, each := range l.List {
block(each.Name, each.Property)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Property)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m ModelProperty
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@ -0,0 +1,36 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "github.com/emicklei/go-restful"
type orderedRouteMap struct {
elements map[string][]restful.Route
keys []string
}
func newOrderedRouteMap() *orderedRouteMap {
return &orderedRouteMap{
elements: map[string][]restful.Route{},
keys: []string{},
}
}
func (o *orderedRouteMap) Add(key string, route restful.Route) {
routes, ok := o.elements[key]
if ok {
routes = append(routes, route)
o.elements[key] = routes
return
}
o.elements[key] = []restful.Route{route}
o.keys = append(o.keys, key)
}
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
for _, k := range o.keys {
block(k, o.elements[k])
}
}

View File

@ -0,0 +1,184 @@
// Package swagger implements the structures of the Swagger
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
package swagger
const swaggerVersion = "1.2"
// 4.3.3 Data Type Fields
type DataTypeFields struct {
Type *string `json:"type,omitempty"` // if Ref not used
Ref *string `json:"$ref,omitempty"` // if Type not used
Format string `json:"format,omitempty"`
DefaultValue Special `json:"defaultValue,omitempty"`
Enum []string `json:"enum,omitempty"`
Minimum string `json:"minimum,omitempty"`
Maximum string `json:"maximum,omitempty"`
Items *Item `json:"items,omitempty"`
UniqueItems *bool `json:"uniqueItems,omitempty"`
}
type Special string
// 4.3.4 Items Object
type Item struct {
Type *string `json:"type,omitempty"`
Ref *string `json:"$ref,omitempty"`
Format string `json:"format,omitempty"`
}
// 5.1 Resource Listing
type ResourceListing struct {
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
Apis []Resource `json:"apis"`
ApiVersion string `json:"apiVersion"`
Info Info `json:"info"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.1.2 Resource Object
type Resource struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
}
// 5.1.3 Info Object
type Info struct {
Title string `json:"title"`
Description string `json:"description"`
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
Contact string `json:"contact,omitempty"`
License string `json:"license,omitempty"`
LicenseUrl string `json:"licenseUrl,omitempty"`
}
// 5.1.5
type Authorization struct {
Type string `json:"type"`
PassAs string `json:"passAs"`
Keyname string `json:"keyname"`
Scopes []Scope `json:"scopes"`
GrantTypes []GrantType `json:"grandTypes"`
}
// 5.1.6, 5.2.11
type Scope struct {
// Required. The name of the scope.
Scope string `json:"scope"`
// Recommended. A short description of the scope.
Description string `json:"description"`
}
// 5.1.7
type GrantType struct {
Implicit Implicit `json:"implicit"`
AuthorizationCode AuthorizationCode `json:"authorization_code"`
}
// 5.1.8 Implicit Object
type Implicit struct {
// Required. The login endpoint definition.
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.1.9 Authorization Code Object
type AuthorizationCode struct {
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
}
// 5.1.10 Login Endpoint Object
type LoginEndpoint struct {
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
}
// 5.1.11 Token Request Endpoint Object
type TokenRequestEndpoint struct {
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "client_id" OAuth2 parameter.
ClientIdName string `json:"clientIdName"`
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
ClientSecretName string `json:"clientSecretName"`
}
// 5.1.12 Token Endpoint Object
type TokenEndpoint struct {
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.2 API Declaration
type ApiDeclaration struct {
SwaggerVersion string `json:"swaggerVersion"`
ApiVersion string `json:"apiVersion"`
BasePath string `json:"basePath"`
ResourcePath string `json:"resourcePath"` // must start with /
Apis []Api `json:"apis,omitempty"`
Models ModelList `json:"models,omitempty"`
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.2.2 API Object
type Api struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
Operations []Operation `json:"operations,omitempty"`
}
// 5.2.3 Operation Object
type Operation struct {
DataTypeFields
Method string `json:"method"`
Summary string `json:"summary,omitempty"`
Notes string `json:"notes,omitempty"`
Nickname string `json:"nickname"`
Authorizations []Authorization `json:"authorizations,omitempty"`
Parameters []Parameter `json:"parameters"`
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Deprecated string `json:"deprecated,omitempty"`
}
// 5.2.4 Parameter Object
type Parameter struct {
DataTypeFields
ParamType string `json:"paramType"` // path,query,body,header,form
Name string `json:"name"`
Description string `json:"description"`
Required bool `json:"required"`
AllowMultiple bool `json:"allowMultiple"`
}
// 5.2.5 Response Message Object
type ResponseMessage struct {
Code int `json:"code"`
Message string `json:"message"`
ResponseModel string `json:"responseModel,omitempty"`
}
// 5.2.6, 5.2.7 Models Object
type Model struct {
Id string `json:"id"`
Description string `json:"description,omitempty"`
Required []string `json:"required,omitempty"`
Properties ModelPropertyList `json:"properties"`
SubTypes []string `json:"subTypes,omitempty"`
Discriminator string `json:"discriminator,omitempty"`
}
// 5.2.8 Properties Object
type ModelProperty struct {
DataTypeFields
Description string `json:"description,omitempty"`
}
// 5.2.10
type Authorizations map[string]Authorization

View File

@ -0,0 +1,21 @@
package swagger
type SwaggerBuilder struct {
SwaggerService
}
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
return &SwaggerBuilder{*newSwaggerService(config)}
}
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
return sb.SwaggerService.produceListing()
}
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
return sb.SwaggerService.produceAllDeclarations()
}
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
return sb.SwaggerService.produceDeclarations(route)
}

View File

@ -0,0 +1,440 @@
package swagger
import (
"fmt"
"github.com/emicklei/go-restful"
// "github.com/emicklei/hopwatch"
"net/http"
"reflect"
"sort"
"strings"
"github.com/emicklei/go-restful/log"
)
type SwaggerService struct {
config Config
apiDeclarationMap *ApiDeclarationList
}
func newSwaggerService(config Config) *SwaggerService {
sws := &SwaggerService{
config: config,
apiDeclarationMap: new(ApiDeclarationList)}
// Build all ApiDeclarations
for _, each := range config.WebServices {
rootPath := each.RootPath()
// skip the api service itself
if rootPath != config.ApiPath {
if rootPath == "" || rootPath == "/" {
// use routes
for _, route := range each.Routes() {
entry := staticPathFromRoute(route)
_, exists := sws.apiDeclarationMap.At(entry)
if !exists {
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
}
}
} else { // use root path
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
}
}
}
// if specified then call the PostBuilderHandler
if config.PostBuildHandler != nil {
config.PostBuildHandler(sws.apiDeclarationMap)
}
return sws
}
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
var LogInfo = func(format string, v ...interface{}) {
// use the restful package-wide logger
log.Printf(format, v...)
}
// InstallSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func InstallSwaggerService(aSwaggerConfig Config) {
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
}
// RegisterSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
sws := newSwaggerService(config)
ws := new(restful.WebService)
ws.Path(config.ApiPath)
ws.Produces(restful.MIME_JSON)
if config.DisableCORS {
ws.Filter(enableCORS)
}
ws.Route(ws.GET("/").To(sws.getListing))
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
wsContainer.Add(ws)
// Check paths for UI serving
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
//if we define a custom static handler use it
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
} else {
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
}
}
func staticPathFromRoute(r restful.Route) string {
static := r.Path
bracket := strings.Index(static, "{")
if bracket <= 1 { // result cannot be empty
return static
}
if bracket != -1 {
static = r.Path[:bracket]
}
if strings.HasSuffix(static, "/") {
return static[:len(static)-1]
} else {
return static
}
}
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
// prevent duplicate header
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
}
}
chain.ProcessFilter(req, resp)
}
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
listing := sws.produceListing()
resp.WriteAsJson(listing)
}
func (sws SwaggerService) produceListing() ResourceListing {
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
ref := Resource{Path: k}
if len(v.Apis) > 0 { // use description of first (could still be empty)
ref.Description = v.Apis[0].Description
}
listing.Apis = append(listing.Apis, ref)
})
return listing
}
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
decl, ok := sws.produceDeclarations(composeRootPath(req))
if !ok {
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
return
}
// unless WebServicesUrl is given
if len(sws.config.WebServicesUrl) == 0 {
// update base path from the actual request
// TODO how to detect https? assume http for now
var host string
// X-Forwarded-Host or Host or Request.Host
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
if !ok || len(hostvalues) == 0 {
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
if !ok || len(forwarded) == 0 {
// fallback to Host field
host = req.Request.Host
} else {
host = forwarded[0]
}
} else {
host = hostvalues[0]
}
// inspect Referer for the scheme (http vs https)
scheme := "http"
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
if strings.HasPrefix(referer[0], "https") {
scheme = "https"
}
}
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
}
resp.WriteAsJson(decl)
}
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
decls := map[string]ApiDeclaration{}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
decls[k] = v
})
return decls
}
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
decl, ok := sws.apiDeclarationMap.At(route)
if !ok {
return nil, false
}
decl.BasePath = sws.config.WebServicesUrl
return &decl, true
}
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
decl := ApiDeclaration{
SwaggerVersion: swaggerVersion,
BasePath: sws.config.WebServicesUrl,
ResourcePath: pathPrefix,
Models: ModelList{},
ApiVersion: ws.Version()}
// collect any path parameters
rootParams := []Parameter{}
for _, param := range ws.PathParameters() {
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
}
// aggregate by path
pathToRoutes := newOrderedRouteMap()
for _, other := range ws.Routes() {
if strings.HasPrefix(other.Path, pathPrefix) {
pathToRoutes.Add(other.Path, other)
}
}
pathToRoutes.Do(func(path string, routes []restful.Route) {
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
voidString := "void"
for _, route := range routes {
operation := Operation{
Method: route.Method,
Summary: route.Doc,
Notes: route.Notes,
// Type gets overwritten if there is a write sample
DataTypeFields: DataTypeFields{Type: &voidString},
Parameters: []Parameter{},
Nickname: route.Operation,
ResponseMessages: composeResponseMessages(route, &decl)}
operation.Consumes = route.Consumes
operation.Produces = route.Produces
// share root params if any
for _, swparam := range rootParams {
operation.Parameters = append(operation.Parameters, swparam)
}
// route specific params
for _, param := range route.ParameterDocs {
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
}
sws.addModelsFromRouteTo(&operation, route, &decl)
api.Operations = append(api.Operations, operation)
}
decl.Apis = append(decl.Apis, api)
})
return decl
}
func withoutWildcard(path string) string {
if strings.HasSuffix(path, ":*}") {
return path[0:len(path)-3] + "}"
}
return path
}
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (messages []ResponseMessage) {
if route.ResponseErrors == nil {
return messages
}
// sort by code
codes := sort.IntSlice{}
for code, _ := range route.ResponseErrors {
codes = append(codes, code)
}
codes.Sort()
for _, code := range codes {
each := route.ResponseErrors[code]
message := ResponseMessage{
Code: code,
Message: each.Message,
}
if each.Model != nil {
st := reflect.TypeOf(each.Model)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
if isCollection {
modelName = "array[" + modelName + "]"
}
modelBuilder{&decl.Models}.addModel(st, "")
// reference the model
message.ResponseModel = modelName
}
messages = append(messages, message)
}
return
}
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
if route.ReadSample != nil {
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
}
if route.WriteSample != nil {
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
}
}
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
isCollection := false
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
st = st.Elem()
isCollection = true
} else {
if st.Kind() == reflect.Ptr {
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
st = st.Elem().Elem()
isCollection = true
}
}
}
return isCollection, st
}
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
if isResponse {
type_, items := asDataType(sample)
operation.Type = type_
operation.Items = items
}
modelBuilder{models}.addModelFrom(sample)
}
func asSwaggerParameter(param restful.ParameterData) Parameter {
return Parameter{
DataTypeFields: DataTypeFields{
Type: &param.DataType,
Format: asFormat(param.DataType, param.DataFormat),
DefaultValue: Special(param.DefaultValue),
},
Name: param.Name,
Description: param.Description,
ParamType: asParamType(param.Kind),
Required: param.Required}
}
// Between 1..7 path parameters is supported
func composeRootPath(req *restful.Request) string {
path := "/" + req.PathParameter("a")
b := req.PathParameter("b")
if b == "" {
return path
}
path = path + "/" + b
c := req.PathParameter("c")
if c == "" {
return path
}
path = path + "/" + c
d := req.PathParameter("d")
if d == "" {
return path
}
path = path + "/" + d
e := req.PathParameter("e")
if e == "" {
return path
}
path = path + "/" + e
f := req.PathParameter("f")
if f == "" {
return path
}
path = path + "/" + f
g := req.PathParameter("g")
if g == "" {
return path
}
return path + "/" + g
}
func asFormat(dataType string, dataFormat string) string {
if dataFormat != "" {
return dataFormat
}
return "" // TODO
}
func asParamType(kind int) string {
switch {
case kind == restful.PathParameterKind:
return "path"
case kind == restful.QueryParameterKind:
return "query"
case kind == restful.BodyParameterKind:
return "body"
case kind == restful.HeaderParameterKind:
return "header"
case kind == restful.FormParameterKind:
return "form"
}
return ""
}
func asDataType(any interface{}) (*string, *Item) {
// If it's not a collection, return the suggested model name
st := reflect.TypeOf(any)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
// if it's not a collection we are done
if !isCollection {
return &modelName, nil
}
// XXX: This is not very elegant
// We create an Item object referring to the given model
models := ModelList{}
mb := modelBuilder{&models}
mb.addModelFrom(any)
elemTypeName := mb.getElementTypeName(modelName, "", st)
item := new(Item)
if mb.isPrimitiveType(elemTypeName) {
mapped := mb.jsonSchemaType(elemTypeName)
item.Type = &mapped
} else {
item.Ref = &elemTypeName
}
tmp := "array"
return &tmp, item
}

268
vendor/github.com/emicklei/go-restful/web_service.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
package restful
import (
"fmt"
"os"
"sync"
"github.com/emicklei/go-restful/log"
)
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
type WebService struct {
rootPath string
pathExpr *pathExpression // cached compilation of rootPath as RegExp
routes []Route
produces []string
consumes []string
pathParameters []*Parameter
filters []FilterFunction
documentation string
apiVersion string
dynamicRoutes bool
// protects 'routes' if dynamic routes are enabled
routesLock sync.RWMutex
}
func (w *WebService) SetDynamicRoutes(enable bool) {
w.dynamicRoutes = enable
}
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
func (w *WebService) compilePathExpression() {
compiled, err := newPathExpression(w.rootPath)
if err != nil {
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
os.Exit(1)
}
w.pathExpr = compiled
}
// ApiVersion sets the API version for documentation purposes.
func (w *WebService) ApiVersion(apiVersion string) *WebService {
w.apiVersion = apiVersion
return w
}
// Version returns the API version for documentation purposes.
func (w WebService) Version() string { return w.apiVersion }
// Path specifies the root URL template path of the WebService.
// All Routes will be relative to this path.
func (w *WebService) Path(root string) *WebService {
w.rootPath = root
if len(w.rootPath) == 0 {
w.rootPath = "/"
}
w.compilePathExpression()
return w
}
// Param adds a PathParameter to document parameters used in the root path.
func (w *WebService) Param(parameter *Parameter) *WebService {
if w.pathParameters == nil {
w.pathParameters = []*Parameter{}
}
w.pathParameters = append(w.pathParameters, parameter)
return w
}
// PathParameter creates a new Parameter of kind Path for documentation purposes.
// It is initialized as required with string as its DataType.
func (w *WebService) PathParameter(name, description string) *Parameter {
return PathParameter(name, description)
}
// PathParameter creates a new Parameter of kind Path for documentation purposes.
// It is initialized as required with string as its DataType.
func PathParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
p.bePath()
return p
}
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
// It is initialized as not required with string as its DataType.
func (w *WebService) QueryParameter(name, description string) *Parameter {
return QueryParameter(name, description)
}
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
// It is initialized as not required with string as its DataType.
func QueryParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beQuery()
return p
}
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
// It is initialized as required without a DataType.
func (w *WebService) BodyParameter(name, description string) *Parameter {
return BodyParameter(name, description)
}
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
// It is initialized as required without a DataType.
func BodyParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
p.beBody()
return p
}
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
// It is initialized as not required with string as its DataType.
func (w *WebService) HeaderParameter(name, description string) *Parameter {
return HeaderParameter(name, description)
}
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
// It is initialized as not required with string as its DataType.
func HeaderParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beHeader()
return p
}
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
// It is initialized as required with string as its DataType.
func (w *WebService) FormParameter(name, description string) *Parameter {
return FormParameter(name, description)
}
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
// It is initialized as required with string as its DataType.
func FormParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beForm()
return p
}
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
func (w *WebService) Route(builder *RouteBuilder) *WebService {
w.routesLock.Lock()
defer w.routesLock.Unlock()
builder.copyDefaults(w.produces, w.consumes)
w.routes = append(w.routes, builder.Build())
return w
}
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
func (w *WebService) RemoveRoute(path, method string) error {
if !w.dynamicRoutes {
return fmt.Errorf("dynamic routes are not enabled.")
}
w.routesLock.Lock()
defer w.routesLock.Unlock()
newRoutes := make([]Route, (len(w.routes) - 1))
current := 0
for ix := range w.routes {
if w.routes[ix].Method == method && w.routes[ix].Path == path {
continue
}
newRoutes[current] = w.routes[ix]
current = current + 1
}
w.routes = newRoutes
return nil
}
// Method creates a new RouteBuilder and initialize its http method
func (w *WebService) Method(httpMethod string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
}
// Produces specifies that this WebService can produce one or more MIME types.
// Http requests must have one of these values set for the Accept header.
func (w *WebService) Produces(contentTypes ...string) *WebService {
w.produces = contentTypes
return w
}
// Consumes specifies that this WebService can consume one or more MIME types.
// Http requests must have one of these values set for the Content-Type header.
func (w *WebService) Consumes(accepts ...string) *WebService {
w.consumes = accepts
return w
}
// Routes returns the Routes associated with this WebService
func (w WebService) Routes() []Route {
if !w.dynamicRoutes {
return w.routes
}
// Make a copy of the array to prevent concurrency problems
w.routesLock.RLock()
defer w.routesLock.RUnlock()
result := make([]Route, len(w.routes))
for ix := range w.routes {
result[ix] = w.routes[ix]
}
return result
}
// RootPath returns the RootPath associated with this WebService. Default "/"
func (w WebService) RootPath() string {
return w.rootPath
}
// PathParameters return the path parameter names for (shared amoung its Routes)
func (w WebService) PathParameters() []*Parameter {
return w.pathParameters
}
// Filter adds a filter function to the chain of filters applicable to all its Routes
func (w *WebService) Filter(filter FilterFunction) *WebService {
w.filters = append(w.filters, filter)
return w
}
// Doc is used to set the documentation of this service.
func (w *WebService) Doc(plainText string) *WebService {
w.documentation = plainText
return w
}
// Documentation returns it.
func (w WebService) Documentation() string {
return w.documentation
}
/*
Convenience methods
*/
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
func (w *WebService) HEAD(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
}
// GET is a shortcut for .Method("GET").Path(subPath)
func (w *WebService) GET(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
}
// POST is a shortcut for .Method("POST").Path(subPath)
func (w *WebService) POST(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
}
// PUT is a shortcut for .Method("PUT").Path(subPath)
func (w *WebService) PUT(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
}
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
func (w *WebService) PATCH(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
}
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
func (w *WebService) DELETE(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
}

View File

@ -0,0 +1,39 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
)
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
var DefaultContainer *Container
func init() {
DefaultContainer = NewContainer()
DefaultContainer.ServeMux = http.DefaultServeMux
}
// If set the true then panics will not be caught to return HTTP 500.
// In that case, Route functions are responsible for handling any error situation.
// Default value is false = recover from panics. This has performance implications.
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
var DoNotRecover = false
// Add registers a new WebService add it to the DefaultContainer.
func Add(service *WebService) {
DefaultContainer.Add(service)
}
// Filter appends a container FilterFunction from the DefaultContainer.
// These are called before dispatching a http.Request to a WebService.
func Filter(filter FilterFunction) {
DefaultContainer.Filter(filter)
}
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
func RegisteredWebServices() []*WebService {
return DefaultContainer.RegisteredWebServices()
}

2
vendor/github.com/fsouza/go-dockerclient/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
# temporary symlink for testing
testing/data/symlink

27
vendor/github.com/fsouza/go-dockerclient/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,27 @@
language: go
sudo: required
go:
- 1.4.2
- 1.5.3
- 1.6
- tip
os:
- linux
- osx
env:
- GOARCH=amd64 DOCKER_VERSION=1.8.3
- GOARCH=386 DOCKER_VERSION=1.8.3
- GOARCH=amd64 DOCKER_VERSION=1.9.1
- GOARCH=386 DOCKER_VERSION=1.9.1
- GOARCH=amd64 DOCKER_VERSION=1.10.3
- GOARCH=386 DOCKER_VERSION=1.10.3
install:
- travis_retry travis-scripts/install.bash
script:
- travis-scripts/run-tests.bash
services:
- docker
matrix:
fast_finish: true
allow_failures:
- go: tip

132
vendor/github.com/fsouza/go-dockerclient/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,132 @@
# This is the official list of go-dockerclient authors for copyright purposes.
Abhishek Chanda <abhishek.becs@gmail.com>
Adam Bell-Hanssen <adamb@aller.no>
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
Aldrin Leal <aldrin@leal.eng.br>
Andreas Jaekle <andreas@jaekle.net>
Andrews Medina <andrewsmedina@gmail.com>
Andrey Sibiryov <kobolog@uber.com>
Andy Goldstein <andy.goldstein@redhat.com>
Antonio Murdaca <runcom@redhat.com>
Artem Sidorenko <artem@2realities.com>
Ben Marini <ben@remind101.com>
Ben McCann <benmccann.com>
Ben Parees <bparees@redhat.com>
Benno van den Berg <bennovandenberg@gmail.com>
Bradley Cicenas <bradley.cicenas@gmail.com>
Brendan Fosberry <brendan@codeship.com>
Brian Lalor <blalor@bravo5.org>
Brian P. Hamachek <brian@brianhama.com>
Brian Palmer <brianp@instructure.com>
Bryan Boreham <bjboreham@gmail.com>
Burke Libbey <burke@libbey.me>
Carlos Diaz-Padron <cpadron@mozilla.com>
Cesar Wong <cewong@redhat.com>
Cezar Sa Espinola <cezar.sa@corp.globo.com>
Cheah Chu Yeow <chuyeow@gmail.com>
cheneydeng <cheneydeng@qq.com>
Chris Bednarski <banzaimonkey@gmail.com>
CMGS <ilskdw@gmail.com>
Colin Hebert <hebert.colin@gmail.com>
Craig Jellick <craig@rancher.com>
Dan Williams <dcbw@redhat.com>
Daniel, Dao Quang Minh <dqminh89@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Darren Shepherd <darren@rancher.com>
Dave Choi <dave.choi@daumkakao.com>
David Huie <dahuie@gmail.com>
Dawn Chen <dawnchen@google.com>
Dinesh Subhraveti <dinesh@gemini-systems.net>
Drew Wells <drew.wells00@gmail.com>
Ed <edrocksit@gmail.com>
Elias G. Schneevoigt <eliasgs@gmail.com>
Erez Horev <erez.horev@elastifile.com>
Eric Anderson <anderson@copperegg.com>
Ewout Prangsma <ewout@prangsma.net>
Fabio Rehm <fgrehm@gmail.com>
Fatih Arslan <ftharsln@gmail.com>
Flavia Missi <flaviamissi@gmail.com>
Francisco Souza <f@souza.cc>
Frank Groeneveld <frank@frankgroeneveld.nl>
George Moura <gwmoura@gmail.com>
Grégoire Delattre <gregoire.delattre@gmail.com>
Guillermo Álvarez Fernández <guillermo@cientifico.net>
Harry Zhang <harryzhang@zju.edu.cn>
He Simei <hesimei@zju.edu.cn>
Ivan Mikushin <i.mikushin@gmail.com>
James Bardin <jbardin@litl.com>
James Nugent <james@jen20.com>
Jari Kolehmainen <jari.kolehmainen@digia.com>
Jason Wilder <jwilder@litl.com>
Jawher Moussa <jawher.moussa@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jeff Mitchell <jeffrey.mitchell@gmail.com>
Jeffrey Hulten <jhulten@gmail.com>
Jen Andre <jandre@gmail.com>
Jérôme Laurens <jeromelaurens@gmail.com>
Johan Euphrosine <proppy@google.com>
John Hughes <hughesj@visa.com>
Kamil Domanski <kamil@domanski.co>
Karan Misra <kidoman@gmail.com>
Ken Herner <chosenken@gmail.com>
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
Kyle Allan <kallan357@gmail.com>
Liron Levin <levinlir@gmail.com>
Lior Yankovich <lior@twistlock.com>
Liu Peng <vslene@gmail.com>
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
Lucas Clemente <lucas@clemente.io>
Lucas Weiblen <lucasweiblen@gmail.com>
Lyon Hill <lyondhill@gmail.com>
Mantas Matelis <mmatelis@coursera.org>
Martin Sweeney <martin@sweeney.io>
Máximo Cuadros Ortiz <mcuadros@gmail.com>
Michael Schmatz <michaelschmatz@gmail.com>
Michal Fojtik <mfojtik@redhat.com>
Mike Dillon <mike.dillon@synctree.com>
Mrunal Patel <mrunalp@gmail.com>
Nate Jones <nate@endot.org>
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
Nicholas Van Wiggeren <nvanwiggeren@digitalocean.com>
Nick Ethier <ncethier@gmail.com>
Omeid Matten <public@omeid.me>
Orivej Desh <orivej@gmx.fr>
Paul Bellamy <paul.a.bellamy@gmail.com>
Paul Morie <pmorie@gmail.com>
Paul Weil <pweil@redhat.com>
Peter Edge <peter.edge@gmail.com>
Peter Jihoon Kim <raingrove@gmail.com>
Phil Lu <lu@stackengine.com>
Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
Rafe Colton <rafael.colton@gmail.com>
Rob Miller <rob@kalistra.com>
Robert Williamson <williamson.robert@gmail.com>
Roman Khlystik <roman.khlystik@gmail.com>
Salvador Gironès <salvadorgirones@gmail.com>
Sam Rijs <srijs@airpost.net>
Sami Wagiaalla <swagiaal@redhat.com>
Samuel Archambault <sarchambault@lapresse.ca>
Samuel Karp <skarp@amazon.com>
Silas Sewell <silas@sewell.org>
Simon Eskildsen <sirup@sirupsen.com>
Simon Menke <simon.menke@gmail.com>
Skolos <skolos@gopherlab.com>
Soulou <leo@unbekandt.eu>
Sridhar Ratnakumar <sridharr@activestate.com>
Summer Mousa <smousa@zenoss.com>
Sunjin Lee <styner32@gmail.com>
Tarsis Azevedo <tarsis@corp.globo.com>
Tim Schindler <tim@catalyst-zero.com>
Timothy St. Clair <tstclair@redhat.com>
Tobi Knaup <tobi@mesosphere.io>
Tom Wilkie <tom.wilkie@gmail.com>
Tonic <tonicbupt@gmail.com>
ttyh061 <ttyh061@gmail.com>
Victor Marmol <vmarmol@google.com>
Vincenzo Prignano <vincenzo.prignano@gmail.com>
Wiliam Souza <wiliamsouza83@gmail.com>
Ye Yin <eyniy@qq.com>
Yu, Zou <zouyu7@huawei.com>
Yuriy Bogdanov <chinsay@gmail.com>

View File

@ -0,0 +1,6 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
You can find the Docker license at the following link:
https://raw.githubusercontent.com/docker/docker/master/LICENSE

22
vendor/github.com/fsouza/go-dockerclient/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2016, go-dockerclient authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

57
vendor/github.com/fsouza/go-dockerclient/Makefile generated vendored Normal file
View File

@ -0,0 +1,57 @@
.PHONY: \
all \
vendor \
lint \
vet \
fmt \
fmtcheck \
pretest \
test \
integration \
cov \
clean
PKGS = . ./testing
all: test
vendor:
@ go get -v github.com/mjibson/party
party -d external -c -u
lint:
@ go get -v github.com/golang/lint/golint
@for file in $$(git ls-files '*.go' | grep -v 'external/'); do \
export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
done; \
exit $${status:-0}
vet:
$(foreach pkg,$(PKGS),go vet $(pkg);)
fmt:
gofmt -s -w $(PKGS)
fmtcheck:
@ export output=$$(gofmt -s -d $(PKGS)); \
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
exit $${status:-0}
pretest: lint vet fmtcheck
gotest:
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
test: pretest gotest
integration:
go test -tags docker_integration -run TestIntegration -v
cov:
@ go get -v github.com/axw/gocov/gocov
@ go get golang.org/x/tools/cmd/cover
gocov test | gocov report
clean:
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)

View File

@ -0,0 +1,105 @@
# go-dockerclient
[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
This package also provides support for docker's network API, which is a simple
passthrough to the libnetwork remote API. Note that docker's network API is
only available in docker 1.8 and above, and only enabled in docker if
DOCKER_EXPERIMENTAL is defined during the docker build process.
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
## Vendoring
If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
for details.
## Example
```go
package main
import (
"fmt"
"github.com/fsouza/go-dockerclient"
)
func main() {
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
for _, img := range imgs {
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
}
}
```
## Using with TLS
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
```go
package main
import (
"fmt"
"github.com/fsouza/go-dockerclient"
)
func main() {
endpoint := "tcp://[ip]:[port]"
path := os.Getenv("DOCKER_CERT_PATH")
ca := fmt.Sprintf("%s/ca.pem", path)
cert := fmt.Sprintf("%s/cert.pem", path)
key := fmt.Sprintf("%s/key.pem", path)
client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
// use client
}
```
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
```go
package main
import (
"fmt"
"github.com/fsouza/go-dockerclient"
)
func main() {
client, _ := docker.NewClientFromEnv()
// use client
}
```
See the documentation for more details.
## Developing
All development commands can be seen in the [Makefile](Makefile).
Commited code must pass:
* [golint](https://github.com/golang/lint)
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
* [gofmt](https://golang.org/cmd/gofmt)
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.

Some files were not shown because too many files have changed in this diff Show More